{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"smiles-featurizers","owner":"MoleculeTransformers","isFork":false,"description":"Extract Molecular SMILES embeddings from language models pre-trained with various objectives architectures.","allTopics":["pytorch","embeddings","fingerprints","bart","smiles","bert","virtual-screening","sentence-embeddings","dense-vectors","moleculenet","virtual-screening-studies","sentence-transformers","simcse","bart-encoder","bart-decoder"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":14,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-09T08:50:00.685Z"}},{"type":"Public","name":"moleculetransformers.github.io","owner":"MoleculeTransformers","isFork":false,"description":"Documentation for the Molecule Transformers.","allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-07-23T10:47:31.475Z"}},{"type":"Public","name":"moleculenet-bert-ssl","owner":"MoleculeTransformers","isFork":false,"description":"Semi-supervised learning techniques (pseudo-label, mixmatch, and co-training) for pre-trained BERT language model amidst low-data regime based on molecular SMILES from the Molecule Net benchmark.","allTopics":["ssl","transformers","pytorch","smiles","co-training","moleculenet","pseudo-label","mixmatch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-17T15:25:52.194Z"}},{"type":"Public","name":"rdkit-benchmarking-platform-transformers","owner":"MoleculeTransformers","isFork":false,"description":"Port of RDKit Benchmarking platform for pre-trained transformers-based language models for virtual screening drug discovery task.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-12-29T17:13:00.577Z"}},{"type":"Public","name":"smiles-augment","owner":"MoleculeTransformers","isFork":false,"description":"Augment molecular SMILES with methods including enumeration, and mixup, for low-data regime settings for downstream supervised drug discovery tasks.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-12-28T12:26:02.399Z"}},{"type":"Public","name":"moleculenet-smiles-bert-mixup","owner":"MoleculeTransformers","isFork":false,"description":"Training pre-trained BERT language model on molecular SMILES from the Molecule Net benchmark by leveraging mixup and enumeration augmentations.","allTopics":["transformers","pytorch","enumeration","bert","mixup","moleculenet","smiles-strings"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":3,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-12-23T13:47:27.626Z"}}],"repositoryCount":6,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}