{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"CLIP-FSAR","owner":"alibaba-mmai-research","isFork":false,"description":"Code for our IJCV 2023 paper \"CLIP-guided Prototype Modulating for Few-shot Action Recognition\".","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":38,"forksCount":5,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-07T03:40:58.728Z"}},{"type":"Public","name":"MoLo","owner":"alibaba-mmai-research","isFork":false,"description":"Code for our CVPR 2023 paper \"MoLo: Motion-augmented Long-short Contrastive Learning for Few-shot Action Recognition\".","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":34,"forksCount":7,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-07T03:38:24.434Z"}},{"type":"Public","name":"CapFSAR","owner":"alibaba-mmai-research","isFork":false,"description":"","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-20T07:52:33.544Z"}},{"type":"Public","name":"DiST","owner":"alibaba-mmai-research","isFork":false,"description":"ICCV2023: Disentangling Spatial and Temporal Learning for Efficient Image-to-Video Transfer Learning","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":29,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-25T03:37:39.388Z"}},{"type":"Public","name":"TAdaConv","owner":"alibaba-mmai-research","isFork":false,"description":"[ICLR 2022] TAda! Temporally-Adaptive Convolutions for Video Understanding. This codebase provides solutions for video classification, video representation learning and temporal detection. ","allTopics":["video-understanding","video-classification","action-localization","tadaconv","pytorch","action-recognition","self-supervised-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":217,"forksCount":29,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-23T09:24:39.853Z"}},{"type":"Public","name":"HyRSMPlusPlus","owner":"alibaba-mmai-research","isFork":false,"description":"Code for our paper \"HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition\".","allTopics":["temporal-coherence","set-matching","few-shot-action-recognition"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":12,"forksCount":2,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-01-03T14:22:36.559Z"}},{"type":"Public","name":"HyRSM","owner":"alibaba-mmai-research","isFork":false,"description":"Code for our CVPR 2022 Paper \"Hybrid Relation Guided Set Matching for Few-shot Action Recognition\".","allTopics":["set-matching","few-shot-action-recognition"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":15,"starsCount":25,"forksCount":4,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-01-03T14:21:53.176Z"}},{"type":"Public","name":"Masked-Action-Recognition","owner":"alibaba-mmai-research","isFork":false,"description":"Official code for the paper: MAR: Masked Autoencoders for Efficient Action Recognition","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":28,"forksCount":3,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-12-07T04:01:16.793Z"}},{"type":"Public","name":"HiCo","owner":"alibaba-mmai-research","isFork":false,"description":"CVPR2022:Learning from Untrimmed Videos: Self-Supervised Video Representation Learning with Hierarchical Consistency","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":15,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-08-10T02:59:52.721Z"}},{"type":"Public","name":"pytorch-video-understanding","owner":"alibaba-mmai-research","isFork":false,"description":"Moved to TAdaConv.","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-01-29T03:45:20.189Z"}}],"repositoryCount":10,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}