{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"ChunkLlama","owner":"HKUNLP","isFork":false,"description":"[ICML'24] Data and code for our paper \"Training-Free Long-Context Scaling of Large Language Models\"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":5,"starsCount":229,"forksCount":12,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-09T06:50:11.103Z"}},{"type":"Public","name":"GIMLET","owner":"HKUNLP","isFork":true,"description":"The code for GIMLET: A Unified Graph-Text Model for Instruction-Based Molecule Zero-Shot Learning","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":2,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-16T13:18:02.164Z"}},{"type":"Public","name":"diffusion-of-thoughts","owner":"HKUNLP","isFork":false,"description":"Code for the paper \"Diffusion of Thoughts: Chain-of-Thought Reasoning in Diffusion Language Models\"","topicNames":["machine-learning","natural-language-processing","text-generation","pytorch","diffusion-models","non-autoregressive","mathematical-reasoning","chain-of-thought-reasoning","diffusion-lm"],"topicsNotShown":0,"allTopics":["machine-learning","natural-language-processing","text-generation","pytorch","diffusion-models","non-autoregressive","mathematical-reasoning","chain-of-thought-reasoning","diffusion-lm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":48,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-24T05:43:10.981Z"}},{"type":"Public","name":"RSA","owner":"HKUNLP","isFork":true,"description":"Retrieved Sequence Augmentation for Protein Representation Learning","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":39,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-01T13:46:27.461Z"}},{"type":"Public","name":"hkunlp.github.io","owner":"HKUNLP","isFork":false,"description":"Website for HKU NLP group (under construction)","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":4,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-26T01:34:55.356Z"}},{"type":"Public","name":"SymGen","owner":"HKUNLP","isFork":false,"description":"[EMNLP'23] Code for Generating Data for Symbolic Language with Large Language Models","topicNames":["python","bash","sql","prolog","data-generation","codex","mtop","symbolic-language"],"topicsNotShown":0,"allTopics":["python","bash","sql","prolog","data-generation","codex","mtop","symbolic-language"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":16,"forksCount":1,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-21T04:33:02.445Z"}},{"type":"Public","name":"multilingual-transfer","owner":"HKUNLP","isFork":false,"description":"Code for paper ”Language Versatilists vs. Specialists: An Empirical Revisiting on Multilingual Transfer Ability“","topicNames":["multilingual","bloom","transfer","llama","reasoning","llms"],"topicsNotShown":0,"allTopics":["multilingual","bloom","transfer","llama","reasoning","llms"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":12,"forksCount":2,"license":null,"participation":[0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-13T06:17:53.958Z"}},{"type":"Public","name":"subgoal-theorem-prover","owner":"HKUNLP","isFork":false,"description":"Code for the paper \"Decomposing the Enigma: Subgoal-based Demonstration Learning for Formal Theorem Proving\"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":1,"starsCount":17,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-25T10:26:58.802Z"}},{"type":"Public","name":"icl-ceil","owner":"HKUNLP","isFork":false,"description":"[ICML 2023] Code for our paper “Compositional Exemplars for In-context Learning”.","topicNames":["retrieval","metric-learning","determinantal-point-processes","compositionality","in-context-learning","large-language-models"],"topicsNotShown":0,"allTopics":["retrieval","metric-learning","determinantal-point-processes","compositionality","in-context-learning","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":88,"forksCount":9,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-15T08:48:48.792Z"}},{"type":"Public","name":"UnifiedSKGsite","owner":"HKUNLP","isFork":true,"description":"A Portal Site for Structured Knowledge Grounding(SKG) Resources.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"SCSS","color":"#c6538c"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":6,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-07T12:57:19.421Z"}},{"type":"Public","name":"efficient-attention","owner":"HKUNLP","isFork":false,"description":"[EVA ICLR'23; LARA ICML'22] Efficient attention mechanisms via control variates, random features, and importance sampling","topicNames":["machine-learning","machine-translation","python3","language-model","attention-mechanism","vision-transformer","pytorch"],"topicsNotShown":0,"allTopics":["machine-learning","machine-translation","python3","language-model","attention-mechanism","vision-transformer","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":76,"forksCount":3,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-07T08:47:28.627Z"}},{"type":"Public","name":"SunGen","owner":"HKUNLP","isFork":true,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-26T06:39:29.596Z"}},{"type":"Public","name":"HumanPrompt","owner":"HKUNLP","isFork":false,"description":"A framework for human-readable prompt-based method with large language models. Specially designed for researchers. (Deprecated, check out LangChain for better usage!)","topicNames":["natural-language-processing","large-language-models","prompt-engineering"],"topicsNotShown":0,"allTopics":["natural-language-processing","large-language-models","prompt-engineering"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":125,"forksCount":8,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-25T10:24:05.496Z"}},{"type":"Public","name":"reparam-discrete-diffusion","owner":"HKUNLP","isFork":false,"description":"Reparameterized Discrete Diffusion Models for Text Generation","topicNames":["machine-learning","natural-language-processing","text-generation","python3","language-model","fairseq","diffusion-models","non-autoregressive","pytorch"],"topicsNotShown":0,"allTopics":["machine-learning","natural-language-processing","text-generation","python3","language-model","fairseq","diffusion-models","non-autoregressive","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":87,"forksCount":2,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-14T01:44:44.667Z"}},{"type":"Public","name":"ProGen","owner":"HKUNLP","isFork":false,"description":"[EMNLP-2022 Findings] Code for paper “ProGen: Progressive Zero-shot Dataset Generation via In-context Feedback”.","topicNames":["data-generation","zero-shot-learning","influence-functions","in-context-learning","large-language-models"],"topicsNotShown":0,"allTopics":["data-generation","zero-shot-learning","influence-functions","in-context-learning","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":20,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-04T16:05:59.664Z"}},{"type":"Public","name":".github","owner":"HKUNLP","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-04-20T18:00:26.383Z"}},{"type":"Public","name":"ZeroGen","owner":"HKUNLP","isFork":true,"description":"[EMNLP 2022] Code for our paper “ZeroGen: Efficient Zero-shot Learning via Dataset Generation”.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":15,"forksCount":10,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-02-18T08:30:35.047Z"}}],"repositoryCount":17,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}