{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"LLM_Explainer","owner":"AI4LIFE-GROUP","isFork":false,"description":"Code for paper: Are Large Language Models Post Hoc Explainers?","allTopics":["interpretability","xai","explainability","large-language-models","llm"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":20,"forksCount":3,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-07T21:05:01.150Z"}},{"type":"Public","name":"SpLiCE","owner":"AI4LIFE-GROUP","isFork":false,"description":"Sparse Linear Concept Embeddings","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":34,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-06T18:09:52.594Z"}},{"type":"Public","name":"average-case-robustness","owner":"AI4LIFE-GROUP","isFork":false,"description":"Characterizing Data Point Vulnerability via Average-Case Robustness, UAI 2024","allTopics":["robustness","adversarial-robustness","robustness-verification","randomized-smoothing","multivariate-normal-distribution"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-07T02:25:16.966Z"}},{"type":"Public","name":"disagreement-problem","owner":"AI4LIFE-GROUP","isFork":false,"description":"Code repo for the disagreement problem paper","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-16T03:36:38.993Z"}},{"type":"Public","name":"OpenXAI","owner":"AI4LIFE-GROUP","isFork":false,"description":"OpenXAI : Towards a Transparent Evaluation of Model Explanations","allTopics":["leaderboard","reproducibility","interpretability","explainable-ai","explainability","benchmark"],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":1,"issueCount":5,"starsCount":218,"forksCount":33,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-31T01:27:03.794Z"}},{"type":"Public","name":"fair-unlearning","owner":"AI4LIFE-GROUP","isFork":false,"description":"Fair Machine Unlearning: Data Removal while Mitigating Disparities","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-15T06:43:32.097Z"}},{"type":"Public","name":"DiET","owner":"AI4LIFE-GROUP","isFork":false,"description":"Code for \"Discriminative Feature Attributions via Distractor Erasure Tuning\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-12T21:49:12.478Z"}},{"type":"Public","name":"amplify","owner":"AI4LIFE-GROUP","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-27T02:40:11.179Z"}},{"type":"Public","name":"Balanced_Recourse","owner":"AI4LIFE-GROUP","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-07T21:50:27.909Z"}},{"type":"Public","name":"lcnn","owner":"AI4LIFE-GROUP","isFork":false,"description":"Low Curvature Neural Networks (NeurIPS 2022)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-06T22:41:26.360Z"}},{"type":"Public","name":"rocerf_code","owner":"AI4LIFE-GROUP","isFork":false,"description":"Source code for ROCERF","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-22T17:05:06.405Z"}},{"type":"Public","name":"ProbabilisticallyRobustRecourse","owner":"AI4LIFE-GROUP","isFork":false,"description":"\"Probabilistically Robust Recourse: Navigating the Trade-offs between Costs and Robustness\". M. Pawelczyk, T. Datta, J. v.d Heuvel, G. Kasneci, H. Lakkaraju. International Conference on Learning Representations 2023 (ICLR).","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-19T07:33:57.246Z"}},{"type":"Public","name":"CounterfactualDistanceAttack","owner":"AI4LIFE-GROUP","isFork":false,"description":"\"On the Privacy Risks of Algorithmic Recourse\". Martin Pawelczyk, Himabindu Lakkaraju* and Seth Neel*. In International Conference on Artificial Intelligence and Statistics (AISTATS), PMLR, 2023.","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-19T07:27:14.570Z"}},{"type":"Public","name":"In-Context-Unlearning","owner":"AI4LIFE-GROUP","isFork":false,"description":"\"In-Context Unlearning: Language Models as Few Shot Unlearners\". Martin Pawelczyk, Seth Neel* and Himabindu Lakkaraju*; arXiv preprint: arXiv:2310.07579; 2023.","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-19T07:11:05.940Z"}},{"type":"Public","name":"robust-grads","owner":"AI4LIFE-GROUP","isFork":false,"description":"Code for https://arxiv.org/abs/2306.06716","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-22T21:07:28.816Z"}},{"type":"Public","name":"UAI22_DataPoisoningAttacksonOff-PolicyPolicyEvaluationMethods_RL","owner":"AI4LIFE-GROUP","isFork":false,"description":"DOPE: Data Poisoning Attacks on Off-Policy Policy Evaluation Methods","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-09T03:02:31.935Z"}},{"type":"Public","name":"GraphXAI","owner":"AI4LIFE-GROUP","isFork":true,"description":"GraphXAI: Resource to support the development and evaluation of GNN explainers","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":26,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-18T18:26:11.735Z"}},{"type":"Public","name":"lfa","owner":"AI4LIFE-GROUP","isFork":false,"description":"Local function approximation (LFA) framework, NeurIPS 2022","allTopics":["function-approximation","interpretability","explainable-ai","explainable-ml","explainability","faithful-explanation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-06T21:01:51.952Z"}},{"type":"Public","name":"arxiv-latex-cleaner","owner":"AI4LIFE-GROUP","isFork":true,"description":"arXiv LaTeX Cleaner: Easily clean the LaTeX code of your paper to submit to arXiv","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":314,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-09-21T07:50:55.360Z"}},{"type":"Public","name":"ROAR","owner":"AI4LIFE-GROUP","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-01-26T14:17:46.888Z"}},{"type":"Public","name":"unified_representation","owner":"AI4LIFE-GROUP","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-12-17T04:48:27.222Z"}},{"type":"Public","name":"fair_ranking_effectiveness_on_outcomes","owner":"AI4LIFE-GROUP","isFork":false,"description":"AIES 2021 Paper: Does Fair Ranking Imporve Minority Outcomes?","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-12-05T20:35:39.755Z"}},{"type":"Public","name":"rise-against-distribution-shift","owner":"AI4LIFE-GROUP","isFork":false,"description":"Code base for robust learning for an intersection of causal and adversarial shifts","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-11-25T17:36:30.473Z"}},{"type":"Public","name":"nifty","owner":"AI4LIFE-GROUP","isFork":true,"description":"Code for paper https://arxiv.org/abs/2102.13186","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":12,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-04-03T00:42:58.485Z"}}],"repositoryCount":24,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}