{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"TeaMs-RL","owner":"SafeRL-Lab","isFork":false,"description":"TeaMs-RL: Teaching LLMs to Teach Themselves Better Instructions via Reinforcement Learning","allTopics":["reinforcement-learning","efficiency","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,12,0,1,0,0,0,0,0,0,3],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-06T22:21:06.131Z"}},{"type":"Public","name":"Safe-Multi-Objective-MuJoCo","owner":"SafeRL-Lab","isFork":false,"description":"Safe Multi-Objective MuJoCo Benchmark.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":1,"license":null,"participation":[0,0,0,3,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-06T00:54:37.924Z"}},{"type":"Public","name":"Safety-MuJoCo","owner":"SafeRL-Lab","isFork":false,"description":"Safety-MuJoCo Environments","allTopics":["safe-rl","safety-control","primal-based-safe-rl"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T15:04:55.768Z"}},{"type":"Public","name":"Massive-MARL-Benchmark","owner":"SafeRL-Lab","isFork":false,"description":"Massive Multi-Agent Reinforcement Learning Benchmark.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-03T04:58:38.666Z"}},{"type":"Public","name":"Safe-MARL-in-Autonomous-Driving","owner":"SafeRL-Lab","isFork":false,"description":"Safe Multi-Agent Reinforcement Learning to Make decisions in Autonomous Driving.","allTopics":["reinforcement-learning","autonomous-driving","multi-agent-reinforcement-learning","safe-reinforcement-learning"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":13,"forksCount":4,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-01T04:19:04.181Z"}},{"type":"Public","name":"Robust-RL-Baselines","owner":"SafeRL-Lab","isFork":false,"description":"Robust Reinforcement Learning Benchmark","allTopics":["benchmark","reinforcement-learning","baseline","robustness"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-29T04:54:40.048Z"}},{"type":"Public","name":"LLM-RL-Robotics-Papers","owner":"SafeRL-Lab","isFork":false,"description":"Large Language Models and Robotics.","allTopics":["reinforcement-learning","robotics","large-language-models"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":15,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-27T21:41:57.176Z"}},{"type":"Public","name":"Safe-Policy-Optimization-Parallel-Version","owner":"SafeRL-Lab","isFork":true,"description":"This is a benchmark repository for safe reinforcement learning algorithms","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":44,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-26T16:57:55.724Z"}},{"type":"Public","name":"Trusted-RL-for-Autonomous-Driving","owner":"SafeRL-Lab","isFork":false,"description":"Trusted reinforcement learning algorithms for autonomous driving.","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-16T09:54:08.129Z"}},{"type":"Public","name":"Safe-RL-Workshop-Seminar","owner":"SafeRL-Lab","isFork":false,"description":"Safe Reinforcement Learning Workshops and Seminars.","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-16T09:45:30.213Z"}},{"type":"Public","name":"Uncertainty-in-RL","owner":"SafeRL-Lab","isFork":false,"description":"The repository is for Reinforcement-Learning Uncertainty research, in which we investigate various uncertain factors in RL.","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":16,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-16T08:03:10.642Z"}},{"type":"Public","name":"Safe-Policy-Optimization","owner":"SafeRL-Lab","isFork":true,"description":"This is a benchmark repository for safe reinforcement learning algorithms","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":44,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-08T02:34:22.722Z"}},{"type":"Public","name":"RL-Plot","owner":"SafeRL-Lab","isFork":false,"description":"Plot RL performance figures.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-03T12:07:05.619Z"}},{"type":"Public","name":"Safe-Multi-Agent-Isaac-Gym","owner":"SafeRL-Lab","isFork":true,"description":"Safe Multi-Agent Isaac Gym benchmark for safe multi-agent reinforcement learning research.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":6,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-11-14T20:50:51.859Z"}},{"type":"Public","name":"Multi-Agent-Constrained-Policy-Optimisation","owner":"SafeRL-Lab","isFork":true,"description":"Multi-Agent Constrained Policy Optimisation (MACPO; MAPPO-L).","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":21,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-11-14T20:12:06.761Z"}},{"type":"Public","name":"Safe-Multi-Agent-Mujoco","owner":"SafeRL-Lab","isFork":true,"description":"Safe Multi-Agent MuJoCo benchmark for safe multi-agent reinforcement learning research.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":7,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-09-06T13:43:33.113Z"}},{"type":"Public","name":"Safe-Reinforcement-Learning-Baselines","owner":"SafeRL-Lab","isFork":true,"description":"The repository is for safe reinforcement learning baselines.","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":75,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-09-06T08:11:02.394Z"}},{"type":"Public","name":"Safe-Multi-Agent-Robosuite","owner":"SafeRL-Lab","isFork":true,"description":"Safe Multi-Agent Robosuite benchmark for safe multi-agent reinforcement learning research.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":4,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-08-21T16:31:42.628Z"}},{"type":"Public","name":"coordination","owner":"SafeRL-Lab","isFork":true,"description":"Learning to Coordinate Manipulation Skills via Skill Behavior Diversification (ICLR 2020)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":9,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-06-22T01:51:11.511Z"}}],"repositoryCount":19,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"SafeRL-Lab repositories"}