{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"unsloth","owner":"unslothai","isFork":false,"description":"Finetune Llama 3, Mistral, Phi & Gemma LLMs 2-5x faster with 80% less memory","allTopics":["ai","llama","lora","gemma","mistral","fine-tuning","finetuning","llms","qlora","llama3","phi3"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":12,"issueCount":380,"starsCount":12893,"forksCount":839,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,61,2,11,4,8,13,11,14,7,4,8,2,1,3,5,7,8,2,9,6,11,1,5,2,6,8,3,8,9,9,0,4,11,37],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-20T21:35:07.575Z"}},{"type":"Public","name":"studio","owner":"unslothai","isFork":false,"description":"Unsloth Studio","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":1,"license":"GNU Affero General Public License v3.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-19T16:20:40.818Z"}},{"type":"Public","name":"llama.cpp","owner":"unslothai","isFork":true,"description":"LLM inference in C/C++","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":8888,"license":"MIT License","participation":[48,25,25,29,76,71,55,41,20,29,46,38,24,34,43,22,32,27,36,20,24,51,45,37,89,68,53,72,61,58,77,68,62,65,69,64,39,46,35,51,39,69,69,69,68,45,43,49,62,60,50,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-12T05:21:03.479Z"}},{"type":"Public","name":"gemma_pytorch","owner":"unslothai","isFork":true,"description":"The official PyTorch implementation of Google's Gemma models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":491,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,3,4,5,7,0,3,1,3,0,0,0,0,0,3,4,0,0,7,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-29T20:49:34.775Z"}},{"type":"Public","name":"hyperlearn","owner":"unslothai","isFork":false,"description":"2-2000x faster ML algos, 50% less memory usage, works on all hardware - new and old.","allTopics":["python","statistics","research","neural-network","optimization","scikit-learn","pytorch","econometrics","data-analysis","tensor","regression-models","statsmodels","data-science","machine-learning","deep-learning","gpu"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":1,"issueCount":1,"starsCount":1692,"forksCount":120,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-27T16:47:59.133Z"}},{"type":"Public","name":"transformers","owner":"unslothai","isFork":true,"description":"🤗 Transformers: State-of-the-art Machine Learning for Pytorch, TensorFlow, and JAX.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":25736,"license":"Apache License 2.0","participation":[79,48,67,66,71,62,62,50,48,41,68,57,52,64,82,61,69,45,38,74,75,50,3,20,54,53,49,55,41,52,48,57,67,73,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-19T11:31:59.295Z"}}],"repositoryCount":6,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"unslothai repositories"}