{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"honest_benchmark","owner":"2501-ai","isFork":false,"description":"An honest benchmark for AI agents running 2501 ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-15T09:10:26.274Z"}},{"type":"Public","name":"2501-humaneval-tests","owner":"2501-ai","isFork":false,"description":"This repository contains tests for evaluating code completions using the HumanEval dataset with 2501. ","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-01T01:00:34.340Z"}}],"repositoryCount":2,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"2501-ai repositories"}