{"payload":{"pageCount":4,"repositories":[{"type":"Public","name":"warp","owner":"NVIDIA","isFork":false,"description":"A Python framework for high performance GPU simulation and graphics","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":36,"starsCount":1770,"forksCount":150,"license":"Other","participation":[26,57,38,10,17,41,9,5,32,24,20,41,7,4,18,29,14,28,32,63,27,32,23,1,26,23,24,0,2,19,39,29,12,6,10,21,48,50,45,40,14,9,14,20,37,35,40,26,21,35,33,32],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T19:18:48.828Z"}},{"type":"Public","name":"NeMo","owner":"NVIDIA","isFork":false,"description":"A scalable generative AI framework built for researchers and developers working on Large Language Models, Multimodal, and Speech AI (Automatic Speech Recognition and Text-to-Speech)","allTopics":["machine-translation","tts","speech-synthesis","neural-networks","deeplearning","speaker-recognition","asr","multimodal","speech-translation","large-language-models","speaker-diariazation","generative-ai"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":82,"issueCount":61,"starsCount":10447,"forksCount":2228,"license":"Apache License 2.0","participation":[12,22,10,17,21,11,17,14,22,23,17,21,23,16,21,31,25,13,24,6,12,4,13,4,6,20,14,4,8,15,17,28,13,13,21,22,39,24,20,41,19,13,34,30,27,28,38,28,30,20,28,40],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T19:23:02.487Z"}},{"type":"Public","name":"modulus","owner":"NVIDIA","isFork":false,"description":"Open-source deep-learning framework for building, training, and fine-tuning deep learning models using state-of-the-art Physics-ML methods","allTopics":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":16,"issueCount":96,"starsCount":751,"forksCount":157,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T18:57:19.759Z"}},{"type":"Public","name":"NeMo-Curator","owner":"NVIDIA","isFork":false,"description":"Scalable toolkit for data curation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":12,"issueCount":34,"starsCount":311,"forksCount":31,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,3,2,0,6,1,6,3,4,2,5,2],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T18:43:59.069Z"}},{"type":"Public","name":"TransformerEngine","owner":"NVIDIA","isFork":false,"description":"A library for accelerating Transformer models on NVIDIA GPUs, including using 8-bit floating point (FP8) precision on Hopper and Ada GPUs, to provide better performance with lower memory utilization in both training and inference.","allTopics":["python","machine-learning","deep-learning","gpu","cuda","pytorch","jax","fp8"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":28,"issueCount":101,"starsCount":1544,"forksCount":241,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T19:13:28.229Z"}},{"type":"Public","name":"NeMo-Framework-Launcher","owner":"NVIDIA","isFork":false,"description":"NeMo Megatron launcher and tools","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":10,"issueCount":23,"starsCount":408,"forksCount":120,"license":"Apache License 2.0","participation":[8,3,4,4,11,12,12,14,14,1,12,13,27,8,1,8,10,27,13,7,6,11,17,10,24,36,23,11,12,23,22,27,19,16,55,25,33,35,17,34,5,6,23,11,29,13,22,54,34,3,21,14],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T16:57:34.580Z"}},{"type":"Public","name":"earth2studio","owner":"NVIDIA","isFork":false,"description":"Open-source deep-learning framework for exploring, building and deploying AI weather/climate workflows.","allTopics":["weather","ai","deep-learning","climate-science"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":40,"forksCount":8,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T16:50:56.426Z"}},{"type":"Public","name":"NVFlare","owner":"NVIDIA","isFork":false,"description":"NVIDIA Federated Learning Application Runtime Environment","allTopics":["python"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":20,"issueCount":24,"starsCount":563,"forksCount":152,"license":"Apache License 2.0","participation":[2,7,1,16,6,11,12,13,9,14,12,10,17,19,17,12,5,6,11,16,16,9,9,9,10,9,24,5,3,17,7,9,7,11,2,3,4,2,5,9,4,10,8,7,14,9,8,8,5,9,6,12],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T18:44:58.761Z"}},{"type":"Public","name":"Megatron-LM","owner":"NVIDIA","isFork":false,"description":"Ongoing research training transformer models at scale","allTopics":["transformers","model-para","large-language-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":126,"issueCount":284,"starsCount":9059,"forksCount":2044,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T19:12:54.392Z"}},{"type":"Public","name":"yum-packaging-nvidia-plugin","owner":"NVIDIA","isFork":false,"description":"NVIDIA yum and dnf plugins for RHEL driver packages","allTopics":["yum-packaging","nvidia-driver-packages","precompiled-kernel-modules"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":11,"forksCount":5,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T07:18:26.544Z"}},{"type":"Public","name":"NeMo-Aligner","owner":"NVIDIA","isFork":false,"description":"Scalable toolkit for efficient model alignment","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":23,"issueCount":40,"starsCount":300,"forksCount":33,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T19:16:16.080Z"}},{"type":"Public","name":"modulus-sym","owner":"NVIDIA","isFork":false,"description":"Framework providing pythonic APIs, algorithms and utilities to be used with Modulus core to physics inform model training as well as higher level abstraction for domain experts","allTopics":["machine-learning","deep-learning","physics","pytorch","nvidia-gpu"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":48,"starsCount":128,"forksCount":54,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T17:30:04.309Z"}},{"type":"Public","name":"cloudai","owner":"NVIDIA","isFork":false,"description":"CloudAI Benchmark Framework","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":0,"starsCount":16,"forksCount":9,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T04:50:26.036Z"}},{"type":"Public","name":"NeMo-Guardrails","owner":"NVIDIA","isFork":false,"description":"NeMo Guardrails is an open-source toolkit for easily adding programmable guardrails to LLM-based conversational systems.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":29,"issueCount":162,"starsCount":3624,"forksCount":314,"license":"Other","participation":[36,46,18,20,27,21,28,15,18,15,21,84,37,29,34,55,71,58,46,51,58,68,53,42,39,47,36,1,11,23,43,27,47,38,65,45,65,64,42,37,94,23,0,18,30,5,12,8,6,14,31,15],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-12T14:20:13.012Z"}},{"type":"Public","name":"nv-cloud-function-helpers","owner":"NVIDIA","isFork":false,"description":"Functions that simplify common tasks with NVIDIA Cloud Functions","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":2,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,5,9,5,0,1,0,0,0,0,1,0,0,0,0,6,2,1,2,0,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-11T13:23:11.321Z"}},{"type":"Public","name":"Deep-Learning-Accelerator-SW","owner":"NVIDIA","isFork":false,"description":"NVIDIA DLA-SW, the recipes and tools for running deep learning workloads on NVIDIA DLA cores for inference applications.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":142,"forksCount":11,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-10T21:39:57.456Z"}},{"type":"Public","name":"NeMo-text-processing","owner":"NVIDIA","isFork":false,"description":"NeMo text processing for ASR and TTS","allTopics":["text-normalization","inverse-text-n"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":4,"starsCount":232,"forksCount":74,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-10T19:02:01.004Z"}},{"type":"Public","name":"apex","owner":"NVIDIA","isFork":false,"description":"A PyTorch Extension: Tools for easy mixed precision and distributed training in Pytorch","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":71,"issueCount":634,"starsCount":8133,"forksCount":1345,"license":"BSD 3-Clause \"New\" or \"Revised\" License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-08T01:43:27.737Z"}},{"type":"Public","name":"ChatRTX","owner":"NVIDIA","isFork":false,"description":"A developer reference project for creating Retrieval Augmented Generation (RAG) chatbots on Windows using TensorRT-LLM","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":20,"starsCount":2477,"forksCount":273,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,9,3,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-07T21:29:41.305Z"}},{"type":"Public","name":"Megatron-Energon","owner":"NVIDIA","isFork":false,"description":"Megatron's multi-modal data loader","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":0,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-07T07:53:14.102Z"}},{"type":"Public","name":"workbench-example-hybrid-rag","owner":"NVIDIA","isFork":false,"description":"An NVIDIA AI Workbench example project for Retrieval Augmented Generation (RAG)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":3,"starsCount":49,"forksCount":133,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,7,0,0,0,0,0,0,0,2,1,1,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-06T22:19:09.092Z"}},{"type":"Public","name":"GenerativeAIExamples","owner":"NVIDIA","isFork":false,"description":"Generative AI reference workflows optimized for accelerated infrastructure and microservice architecture.","allTopics":["microservice","gpu-acceleration","nemo","tensorrt","rag","triton-inference-server","large-language-models","llm","llm-inference","retrieval-augmented-generation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":11,"issueCount":20,"starsCount":1705,"forksCount":279,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-06T22:12:54.981Z"}},{"type":"Public","name":"NeMo-speech-data-processor","owner":"NVIDIA","isFork":false,"description":"A toolkit for processing speech data and creating speech datasets","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":1,"starsCount":65,"forksCount":21,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T16:34:34.335Z"}},{"type":"Public","name":"hpc-container-maker","owner":"NVIDIA","isFork":false,"description":"HPC Container Maker","allTopics":["docker","containers","hpc","singularity"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":11,"starsCount":439,"forksCount":87,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-06T14:27:21.650Z"}},{"type":"Public","name":"numbast","owner":"NVIDIA","isFork":false,"description":"Numbast is a tool to build an automated pipeline that converts CUDA APIs into Numba bindings.","allTopics":["cuda","numba"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":18,"starsCount":13,"forksCount":3,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T15:32:34.632Z"}},{"type":"Public","name":"spark-rapids-benchmarks","owner":"NVIDIA","isFork":false,"description":"Spark RAPIDS Benchmarks – benchmark sets and utilities for the RAPIDS Accelerator for Apache Spark","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":21,"starsCount":32,"forksCount":26,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-13T16:15:34.581Z"}},{"type":"Public","name":"swift","owner":"NVIDIA","isFork":true,"description":"OpenStack Storage (Swift). Mirror of code maintained at opendev.org.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":1059,"license":"Apache License 2.0","participation":[4,19,10,5,3,13,1,23,8,15,4,5,13,5,14,3,5,8,3,5,6,5,6,2,5,2,2,1,3,10,3,10,4,9,7,5,8,8,15,7,4,4,1,0,4,13,4,15,7,3,3,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-04T21:08:42.172Z"}},{"type":"Public","name":"Stable-Diffusion-WebUI-TensorRT","owner":"NVIDIA","isFork":false,"description":"TensorRT Extension for Stable Diffusion Web UI","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":15,"issueCount":139,"starsCount":1819,"forksCount":138,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-02T13:49:56.975Z"}},{"type":"Public","name":"TensorRT-Model-Optimizer","owner":"NVIDIA","isFork":false,"description":"TensorRT Model Optimizer is a unified library of state-of-the-art model optimization techniques such as quantization and sparsity. It compresses deep learning models for downstream deployment frameworks like TensorRT-LLM or TensorRT to optimize inference speed on NVIDIA GPUs.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":10,"starsCount":256,"forksCount":14,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-29T07:44:32.173Z"}},{"type":"Public","name":"tao_tensorflow2_backend","owner":"NVIDIA","isFork":false,"description":"TAO Toolkit deep learning networks with TensorFlow 2.x backend","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":5,"forksCount":0,"license":"Apache License 2.0","participation":[0,0,0,0,1,3,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-29T01:23:40.767Z"}}],"repositoryCount":109,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}