{"payload":{"header_redesign_enabled":false,"results":[{"id":"510169491","archived":false,"color":"#3572A5","followers":286,"has_funding_file":false,"hl_name":"prismformore/Multi-Task-Transformer","hl_trunc_description":"Code of ICLR2023 paper \"TaskPrompter: Spatial-Channel Multi-Task Prompting for Dense Scene Understanding\" and ECCV2022 paper \"Inverted Py…","language":"Python","mirror":false,"owned_by_organization":false,"public":true,"repo":{"repository":{"id":510169491,"name":"Multi-Task-Transformer","owner_id":14089338,"owner_login":"prismformore","updated_at":"2024-04-24T04:30:12.439Z","has_issues":true}},"sponsorable":false,"topics":["pascal","computer-vision","deep-learning","segmentation","human-parsing","depth-estimation","cityscapes","multi-task-learning","scene-understanding","nyudv2","eccv2022","cityscapes-3d"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":0,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":99,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Aprismformore%252FMulti-Task-Transformer%2B%2Blanguage%253APython","metadata":null,"csrf_tokens":{"/prismformore/Multi-Task-Transformer/star":{"post":"2Zd6YfqaFjqmk4pmmKGcMsrwkuDUNb2W-2O7V6Zw-WS2stxVqhZa9zF9yHEwEvAInbNs_e1W_75UMw4rtRr0VQ"},"/prismformore/Multi-Task-Transformer/unstar":{"post":"88EZ_Gtl2a1TFCVcxid9VHyQTdb6EfPSjCMYVJ8VycmCFKPm26UgdtFfR3yqsEoPRA-L6YP9EGctqCDEDaXgHw"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"4DuidMma0UV1_3SmfEaud6Ic87LNeLMWSAkknrKgVjRwta8oMcCh6aOm7ahiGnH30puJ3t_Vsj3Z-r5sU6kK_w"}}},"title":"Repository search results"}