{"payload":{"pageCount":2,"repositories":[{"type":"Public","name":"CuMo","owner":"SHI-Labs","isFork":false,"description":"CuMo: Scaling Multimodal LLM with Co-Upcycled Mixture-of-Experts","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":56,"forksCount":3,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,17],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-11T18:45:48.828Z"}},{"type":"Public","name":"VCoder","owner":"SHI-Labs","isFork":false,"description":"VCoder: Versatile Vision Encoders for Multimodal Large Language Models, arXiv 2023 / CVPR 2024","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":2,"starsCount":235,"forksCount":12,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,3,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-17T05:09:38.772Z"}},{"type":"Public","name":"StyleNAT","owner":"SHI-Labs","isFork":false,"description":"New flexible and efficient image generation framework that sets new SOTA on FFHQ-256 with FID 2.05, 2022","topicNames":["gan","image-generation","neighborhood-attention"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":97,"forksCount":9,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-07T04:13:06.759Z"}},{"type":"Public","name":"Smooth-Diffusion","owner":"SHI-Labs","isFork":false,"description":"Smooth Diffusion: Crafting Smooth Latent Spaces in Diffusion Models arXiv 2023 / CVPR 2024","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":266,"forksCount":8,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,8,4,0,0,0,0,0,0,0,0,0,0,0,0,0,6,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-25T11:51:53.288Z"}},{"type":"Public","name":"Neighborhood-Attention-Transformer","owner":"SHI-Labs","isFork":false,"description":"Neighborhood Attention Transformer, arxiv 2022 / CVPR 2023. Dilated Neighborhood Attention Transformer, arxiv 2022","topicNames":["pytorch","neighborhood-attention"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":1001,"forksCount":81,"license":"MIT License","participation":[1,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-05T22:01:30.100Z"}},{"type":"Public","name":"Rethinking-Text-Segmentation","owner":"SHI-Labs","isFork":false,"description":"[CVPR 2021] Rethinking Text Segmentation: A Novel Dataset and A Text-Specific Refinement Approach","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":7,"starsCount":237,"forksCount":26,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-02T20:46:36.762Z"}},{"type":"Public","name":"Matting-Anything","owner":"SHI-Labs","isFork":false,"description":"Matting Anything Model (MAM), an efficient and versatile framework for estimating the alpha matte of any instance in an image with flexible and interactive visual or linguistic user prompt guidance.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":541,"forksCount":40,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-18T04:27:11.070Z"}},{"type":"Public","name":"Prompt-Free-Diffusion","owner":"SHI-Labs","isFork":false,"description":"Prompt-Free Diffusion: Taking \"Text\" out of Text-to-Image Diffusion Models, arxiv 2023 / CVPR 2024","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":10,"starsCount":708,"forksCount":35,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-16T03:42:55.222Z"}},{"type":"Public","name":"VIM","owner":"SHI-Labs","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":49,"forksCount":4,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-08T01:48:33.705Z"}},{"type":"Public","name":"Versatile-Diffusion","owner":"SHI-Labs","isFork":false,"description":"Versatile Diffusion: Text, Images and Variations All in One Diffusion Model, arXiv 2022 / ICCV 2023","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":8,"starsCount":1292,"forksCount":80,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-10T10:17:01.938Z"}},{"type":"Public","name":"Forget-Me-Not","owner":"SHI-Labs","isFork":false,"description":"Forget-Me-Not: Learning to Forget in Text-to-Image Diffusion Models, 2023","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":97,"forksCount":6,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-17T20:58:45.977Z"}},{"type":"Public","name":"OneFormer-Colab","owner":"SHI-Labs","isFork":false,"description":"[Colab Demo Code] OneFormer: One Transformer to Rule Universal Image Segmentation.","topicNames":["transformer","coco","image-segmentation","semantic-segmentation","cityscapes","instance-segmentation","ade20k","panoptic-segmentation","universal-segmentation","oneformer"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":13,"forksCount":8,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-24T20:32:02.017Z"}},{"type":"Public","name":"PAIR-Diffusion","owner":"SHI-Labs","isFork":true,"description":"PAIR-Diffusion: Object-Level Image Editing with Structure-and-Appearance Paired Diffusion Models, 2023","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":23,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-19T20:24:43.350Z"}},{"type":"Public","name":"Text2Video-Zero","owner":"SHI-Labs","isFork":true,"description":"a copy of \"Text-to-Image Diffusion Models are Zero-Shot Video Generators\", ICCV 2023","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":325,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-06T22:35:28.641Z"}},{"type":"Public","name":"Text2Video-Zero-sd-webui","owner":"SHI-Labs","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":6,"starsCount":78,"forksCount":14,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-10T20:45:14.897Z"}},{"type":"Public","name":"SH-GAN","owner":"SHI-Labs","isFork":false,"description":"[WACV 2023] Image Completion with Heterogeneously Filtered Spectral Hints","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":60,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-28T17:37:11.780Z"}},{"type":"Public","name":"Compact-Transformers","owner":"SHI-Labs","isFork":false,"description":"Escaping the Big Data Paradigm with Compact Transformers, 2021 (Train your Vision Transformers in 30 mins on CIFAR-10 with a single GPU!)","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":9,"starsCount":472,"forksCount":74,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-20T21:02:09.188Z"}},{"type":"Public","name":"Boosted-Dynamic-Networks","owner":"SHI-Labs","isFork":false,"description":" Boosted Dynamic Neural Networks, AAAI 2023","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":1,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-12-01T04:45:19.594Z"}},{"type":"Public","name":"VMFormer","owner":"SHI-Labs","isFork":false,"description":"[Preprint] VMFormer: End-to-End Video Matting with Transformer","topicNames":["video-matting","vision-transformer"],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":7,"starsCount":103,"forksCount":10,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-11-30T05:09:29.723Z"}},{"type":"Public","name":"Unsupervised-Domain-Adaptation-with-Differential-Treatment","owner":"SHI-Labs","isFork":false,"description":"[CVPR 2020] Differential Treatment for Stuff and Things: A Simple Unsupervised Domain Adaptation Method for Semantic Segmentation","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":5,"starsCount":86,"forksCount":13,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-11-22T05:46:11.637Z"}},{"type":"Public","name":"Convolutional-MLPs","owner":"SHI-Labs","isFork":false,"description":"[Preprint] ConvMLP: Hierarchical Convolutional MLPs for Vision, 2021","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":161,"forksCount":17,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-11T19:20:01.326Z"}},{"type":"Public","name":"LIVE-Layerwise-Image-Vectorization","owner":"SHI-Labs","isFork":true,"description":"[CVPR 2022 Oral] Towards Layer-wise Image Vectorization","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":48,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-06-10T00:59:36.971Z"}},{"type":"Public","name":"VideoINR-Continuous-Space-Time-Super-Resolution","owner":"SHI-Labs","isFork":true,"description":"[CVPR 2022] VideoINR: Learning Video Implicit Neural Representation for Continuous Space-Time Super-Resolution","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":26,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-06-09T16:16:22.962Z"}},{"type":"Public","name":"SinNeRF","owner":"SHI-Labs","isFork":true,"description":"\"SinNeRF: Training Neural Radiance Fields on Complex Scenes from a Single Image\", Dejia Xu, Yifan Jiang, Peihao Wang, Zhiwen Fan, Humphrey Shi, Zhangyang Wang","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":26,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-05-03T18:40:23.591Z"}},{"type":"Public","name":"micromotion-styleGAN","owner":"SHI-Labs","isFork":true,"description":"","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":12,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-05-01T23:35:55.674Z"}},{"type":"Public","name":"SeMask-Segmentation","owner":"SHI-Labs","isFork":true,"description":"[Preprint] SeMask: Semantically Masked Transformers for Semantic Segmentation.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":35,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-04-13T08:46:02.926Z"}},{"type":"Public","name":"AdaFocusV2","owner":"SHI-Labs","isFork":true,"description":"[CVPR 2022] AdaFocus V2: End-to-End Training of Spatial Dynamic Networks for Video Recognition","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":13,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-04-12T02:45:49.088Z"}},{"type":"Public","name":"Pseudo-IoU-for-Anchor-Free-Object-Detection","owner":"SHI-Labs","isFork":false,"description":"Pseudo-IoU: Improving Label Assignment in Anchor-Free Object Detection","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":30,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-07-23T21:07:55.138Z"}},{"type":"Public","name":"Cross-Scale-Non-Local-Attention","owner":"SHI-Labs","isFork":false,"description":"PyTorch code for our paper \"Image Super-Resolution with Cross-Scale Non-Local Attention and Exhaustive Self-Exemplars Mining\" (CVPR2020).","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":24,"starsCount":400,"forksCount":46,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-03-04T06:12:36.054Z"}},{"type":"Public","name":"CCNet","owner":"SHI-Labs","isFork":true,"description":"A copy of our PyTorch code for CCNet: Criss-Cross Attention for Semantic Segmentation (TPAMI 2020 & ICCV 2019). Please follow the repo at speedinghzl/CCNet for updates.","topicNames":[],"topicsNotShown":0,"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":278,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2020-07-10T15:56:00.713Z"}}],"repositoryCount":38,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"mirror","text":"Mirrors"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}