{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"ragas","owner":"explodinggradients","isFork":false,"description":"Evaluation framework for your Retrieval Augmented Generation (RAG) pipelines","allTopics":["llm","llmops"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":46,"issueCount":212,"starsCount":6702,"forksCount":660,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-26T11:33:52.866Z"}},{"type":"Public","name":"notes","owner":"explodinggradients","isFork":false,"description":"Research notes and extra resources for all the work at explodinggradients.com","allTopics":["research","notes","opensourceai","llms"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":18,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-23T21:22:22.358Z"}},{"type":"Public","name":"LLaMA-Factory","owner":"explodinggradients","isFork":true,"description":"Unify Efficient Fine-tuning of 100+ LLMs","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":3877,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-03T22:07:29.907Z"}},{"type":"Public","name":".github","owner":"explodinggradients","isFork":false,"description":"","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-24T19:21:40.927Z"}},{"type":"Public","name":"Funtuner","owner":"explodinggradients","isFork":false,"description":"Supervised instruction finetuning for LLM with HF trainer and Deepspeed ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":32,"forksCount":4,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-06T13:18:16.591Z"}},{"type":"Public","name":"nemesis","owner":"explodinggradients","isFork":false,"description":"Reward Model framework for LLM RLHF","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":57,"forksCount":3,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-07T14:59:52.505Z"}}],"repositoryCount":6,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"explodinggradients repositories"}