{"payload":{"header_redesign_enabled":false,"results":[{"id":"278810244","archived":false,"color":"#3572A5","followers":4177,"has_funding_file":false,"hl_name":"open-mmlab/mmaction2","hl_trunc_description":"OpenMMLab's Next Generation Video Understanding Toolbox and Benchmark","language":"Python","mirror":false,"owned_by_organization":true,"public":true,"repo":{"repository":{"id":278810244,"name":"mmaction2","owner_id":10245193,"owner_login":"open-mmlab","updated_at":"2024-08-14T07:45:53.829Z","has_issues":true}},"sponsorable":false,"topics":["benchmark","deep-learning","pytorch","ava","x3d","action-recognition","video-understanding","video-classification","tsm","non-local","i3d","tsn","slowfast","temporal-action-localization","spatial-temporal-action-detection","openmmlab","posec3d","uniformerv2"],"type":"Public","help_wanted_issues_count":5,"good_first_issue_issues_count":2,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":56,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Aopen-mmlab%252Fmmaction2%2B%2Blanguage%253APython","metadata":null,"warn_limited_results":false,"csrf_tokens":{"/open-mmlab/mmaction2/star":{"post":"NNF0bQr5-hQIvVPHx-vFdctu9jA46JiXmfXjlU2vY0Z2XsiwG4QknZ17ZhhrOaOFmypUnAav340sEPc4EovyQw"},"/open-mmlab/mmaction2/unstar":{"post":"Y3LOrBJ3N_xHJYU9LWqwt2bWwQW8Um2I8BoRx5F6od9aVk-BWP2kw-4udAttL31hWP9rr-_dzLc419Hkc4DYMA"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"w5SequBupQSGuq4NeA4ZNJyEQwIlTutsX2ZoEzG7sUTXm-t6_tzkWQc87zw6Sk3rzgMF0SDdygknYpQ4x3nEOA"}}},"title":"Repository search results"}