{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"FasterTransformer4CodeFuse","owner":"codefuse-ai","isFork":false,"description":"High-performance LLM inference based on our optimized version of FastTransfomer","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":123,"forksCount":9,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-14T08:38:26.663Z"}}],"repositoryCount":1,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}