{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"safe-reward","owner":"ai-fail-safe","isFork":false,"description":"a prototype for an AI safety library that allows an agent to maximize its reward by solving a puzzle in order to prevent the worst-case outcomes of perverse instantiation ","allTopics":["failsafe","ai-safety","anomaly-detection","ai-alignment","fail-safe"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":8,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-11-08T02:03:16.295Z"}},{"type":"Public","name":"honeypot","owner":"ai-fail-safe","isFork":false,"description":"a project to detect environment tampering on the part of an agent","allTopics":["failsafe","ai-safety","anomaly-detection","ai-alignment","fail-safe"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-31T00:38:24.861Z"}},{"type":"Public","name":"mulligan","owner":"ai-fail-safe","isFork":false,"description":"a library designed to shut down an agent exhibiting unexpected behavior providing a potential \"mulligan\" to human civilization; IN CASE OF FAILURE, DO NOT JUST REMOVE THIS CONSTRAINT AND START IT BACK UP AGAIN","allTopics":["failsafe","ai-safety","anomaly-detection","ai-alignment","fail-safe"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-30T21:35:49.598Z"}},{"type":"Public","name":"gene-drive","owner":"ai-fail-safe","isFork":false,"description":"a project to ensure that all child processes created by an agent \"inherit\" the agent's safety controls","allTopics":["failsafe","ai-safety","ai-alignment","fail-safe"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-29T21:46:09.266Z"}},{"type":"Public","name":"life-span","owner":"ai-fail-safe","isFork":false,"description":"a project to ensure an artificial agent will eventually reach the end of its existence","allTopics":["failsafe","ai-safety","ai-alignment","fail-safe"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-29T21:42:40.135Z"}}],"repositoryCount":5,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"ai-fail-safe repositories"}