{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"reds","owner":"ruoxi-jia-group","isFork":false,"description":"Homepage portfolio of Reds Projects","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-18T23:02:06.418Z"}},{"type":"Public","name":"WokeyTalky","owner":"ruoxi-jia-group","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-18T19:20:49.492Z"}},{"type":"Public","name":"Woke-Pipeline","owner":"ruoxi-jia-group","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-14T21:28:52.429Z"}},{"type":"Public","name":"LAVA","owner":"ruoxi-jia-group","isFork":false,"description":"This is an official repository for \"LAVA: Data Valuation without Pre-Specified Learning Algorithms\" (ICLR2023). ","allTopics":["efficient","ot","optimal-transport","model-agnostic","data-valuation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":2,"starsCount":37,"forksCount":7,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-05T06:31:22.645Z"}},{"type":"Public","name":"Nash-Meta-Learning","owner":"ruoxi-jia-group","isFork":false,"description":"Official implementation of \"Fairness-Aware Meta-Learning via Nash Bargaining.\" We explore hypergradient conflicts in one-stage meta-learning and their impact on fairness. Our two-stage approach uses Nash bargaining to mitigate conflicts, enhancing fairness and model performance simultaneously.","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-15T23:54:54.651Z"}},{"type":"Public","name":"dataselection","owner":"ruoxi-jia-group","isFork":false,"description":"Projektor Website","allTopics":[],"primaryLanguage":{"name":"JavaScript","color":"#f1e05a"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-14T21:43:52.964Z"}},{"type":"Public","name":"projektor","owner":"ruoxi-jia-group","isFork":false,"description":"This is an official repository for \"Performance Scaling via Optimal Transport: Enabling Data Selection from Partially Revealed Sources\" (NeurIPS 2023). ","allTopics":["projection","performance-prediction","data-selection","scaling-law"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":1,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-26T22:34:48.560Z"}},{"type":"Public","name":"privmon","owner":"ruoxi-jia-group","isFork":false,"description":"This is an official repository for PrivMon: A Stream-Based System for Real-Time Privacy Attack Detection for Machine Learning Models (RAID 2023)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-10-16T08:06:57.000Z"}},{"type":"Public","name":"CLIP-MIA","owner":"ruoxi-jia-group","isFork":false,"description":"This is an official repository for Practical Membership Inference Attacks Against Large-Scale Multi-Modal Models: A Pilot Study (ICCV2023).","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":1,"starsCount":15,"forksCount":2,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-09-29T12:32:09.376Z"}},{"type":"Public","name":"2d-shapley","owner":"ruoxi-jia-group","isFork":false,"description":"This is an official repository for \"2D-Shapley: A Framework for Fragmented Data Valuation\" (ICML2023). ","allTopics":["shapley","data-valuation","2d-shapley"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":1,"starsCount":3,"forksCount":1,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-27T18:44:21.815Z"}},{"type":"Public","name":"Trojan_Removal_Benchmark","owner":"ruoxi-jia-group","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-03T02:53:21.304Z"}},{"type":"Public","name":"ASSET","owner":"ruoxi-jia-group","isFork":false,"description":"This repository is the official implementation of the paper \"ASSET: Robust Backdoor Data Detection Across a Multiplicity of Deep Learning Paradigms.\" ASSET achieves state-of-the-art reliability in detecting poisoned samples in end-to-end supervised learning/ self-supervised learning/ transfer learning.","allTopics":["ai","backdoor","transfer-learning","self-supervised-learning","backdoor-attacks","backdoor-defense","aisecurity","backdoor-detection"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":14,"forksCount":0,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-06-07T23:43:04.855Z"}},{"type":"Public","name":"Narcissus","owner":"ruoxi-jia-group","isFork":false,"description":"The official implementation of the CCS'23 paper, Narcissus clean-label backdoor attack -- only takes THREE images to poison a face recognition dataset in a clean-label way and achieves a 99.89% attack success rate.","allTopics":["adversarial-machine-learning","adversarial-attacks","ai-security","backdoor-attacks","deep-","poisoning-attacks"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":96,"forksCount":10,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-09T06:17:24.542Z"}},{"type":"Public","name":"Meta-Sift","owner":"ruoxi-jia-group","isFork":false,"description":"The official implementation of USENIX Security'23 paper \"Meta-Sift\" -- Ten minutes or less to find a 1000-size or larger clean subset on poisoned dataset.","allTopics":["ai-security","backdoor-attacks","data-poisoning","dataset-security"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":15,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-04-27T23:00:52.478Z"}},{"type":"Public","name":"Universal_Pert_Cert","owner":"ruoxi-jia-group","isFork":false,"description":"This repo is the official implementation of the ICLR'23 paper \"Towards Robustness Certification Against Universal Perturbations.\" We calculate the certified robustness against universal perturbations (UAP/ Backdoor) given a trained model.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":12,"forksCount":2,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-14T23:24:03.341Z"}},{"type":"Public","name":"I-BAU","owner":"ruoxi-jia-group","isFork":true,"description":"Official Implementation of the ICLR 2022 paper, ``Adversarial Unlearning of Backdoors via Implicit Hypergradient''","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":13,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-04-24T21:28:32.509Z"}},{"type":"Public","name":"frequency-backdoor","owner":"ruoxi-jia-group","isFork":true,"description":"The official implementation of the ICCV 2021 paper, \"Rethinking the backdoor attacks' triggers: A frequency perspective.\"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":5,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-11-30T21:24:50.035Z"}},{"type":"Public","name":"Knowledge-Enriched-DMI","owner":"ruoxi-jia-group","isFork":true,"description":"The official implementation of the ICCV 2021 paper, \"Knowledge-Enriched Distributional Model Inversion Attacks.\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":11,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-11-06T01:38:45.617Z"}}],"repositoryCount":18,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}