Skip to content

Commit

Permalink
Update paper.bib
Browse files Browse the repository at this point in the history
  • Loading branch information
sgbaird committed Jul 29, 2022
1 parent 4c8aafd commit 0b34988
Showing 1 changed file with 11 additions and 29 deletions.
40 changes: 11 additions & 29 deletions reports/paper.bib
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ @article{gomez-bombarelliAutomaticChemicalDesign2016
author = {{G{\'o}mez-Bombarelli}, Rafael and Wei, Jennifer N. and Duvenaud, David and {Hern{\'a}ndez-Lobato}, Jos{\'e} Miguel and {S{\'a}nchez-Lengeling}, Benjam{\'i}n and Sheberla, Dennis and {Aguilera-Iparraguirre}, Jorge and Hirzel, Timothy D. and Adams, Ryan P. and {Aspuru-Guzik}, Al{\'a}n},
year = {2016},
publisher = {{arXiv}},
doi = {10.48550/ARXIV.1610.02415v1},
doi = {10.48550/ARXIV.1610.02415},
copyright = {arXiv.org perpetual, non-exclusive license},
keywords = {Chemical Physics (physics.chem-ph),FOS: Computer and information sciences,FOS: Physical sciences,Machine Learning (cs.LG)}
}
Expand Down Expand Up @@ -62,22 +62,6 @@ @article{goodallPredictingMaterialsProperties2020
file = {C\:\\Users\\sterg\\Zotero\\storage\\IF6WR9YR\\Goodall and Lee - 2020 - Predicting materials properties without crystal st.pdf}
}

@misc{kingmaAutoEncodingVariationalBayes2014,
title = {Auto-{{Encoding Variational Bayes}}},
author = {Kingma, Diederik P. and Welling, Max},
year = {2014},
month = may,
number = {arXiv:1312.6114},
eprint = {1312.6114},
eprinttype = {arxiv},
primaryclass = {cs, stat},
publisher = {{arXiv}},
abstract = {How can we perform efficient inference and learning in directed probabilistic models, in the presence of continuous latent variables with intractable posterior distributions, and large datasets? We introduce a stochastic variational inference and learning algorithm that scales to large datasets and, under some mild differentiability conditions, even works in the intractable case. Our contributions is two-fold. First, we show that a reparameterization of the variational lower bound yields a lower bound estimator that can be straightforwardly optimized using standard stochastic gradient methods. Second, we show that for i.i.d. datasets with continuous latent variables per datapoint, posterior inference can be made especially efficient by fitting an approximate inference model (also called a recognition model) to the intractable posterior using the proposed lower bound estimator. Theoretical advantages are reflected in experimental results.},
archiveprefix = {arXiv},
langid = {english},
keywords = {Computer Science - Machine Learning,Statistics - Machine Learning}
}

@misc{kingmaAutoEncodingVariationalBayes2014a,
title = {Auto-{{Encoding Variational Bayes}}},
author = {Kingma, Diederik P. and Welling, Max},
Expand Down Expand Up @@ -159,22 +143,20 @@ @misc{riebesellPymatviz2022
keywords = {data-visualization,machine-learning,materials-informatics,matplotlib,plotly,plots,uncertainty,uncertainty-calibration}
}

@misc{sahariaPaletteImagetoImageDiffusion2022,
@inproceedings{sahariaPaletteImagetoImageDiffusion2022a,
title = {Palette: {{Image-to-Image Diffusion Models}}},
shorttitle = {Palette},
author = {Saharia, Chitwan and Chan, William and Chang, Huiwen and Lee, Chris A. and Ho, Jonathan and Salimans, Tim and Fleet, David J. and Norouzi, Mohammad},
booktitle = {Special {{Interest Group}} on {{Computer Graphics}} and {{Interactive Techniques Conference Proceedings}}},
author = {Saharia, Chitwan and Chan, William and Chang, Huiwen and Lee, Chris and Ho, Jonathan and Salimans, Tim and Fleet, David and Norouzi, Mohammad},
year = {2022},
month = may,
number = {arXiv:2111.05826},
eprint = {2111.05826},
eprinttype = {arxiv},
primaryclass = {cs},
publisher = {{arXiv}},
abstract = {This paper develops a unified framework for image-to-image translation based on conditional diffusion models and evaluates this framework on four challenging image-to-image translation tasks, namely colorization, inpainting, uncropping, and JPEG restoration. Our simple implementation of image-to-image diffusion models outperforms strong GAN and regression baselines on all tasks, without task-specific hyper-parameter tuning, architecture customization, or any auxiliary loss or sophisticated new techniques needed. We uncover the impact of an L2 vs. L1 loss in the denoising diffusion objective on sample diversity, and demonstrate the importance of self-attention in the neural architecture through empirical studies. Importantly, we advocate a unified evaluation protocol based on ImageNet, with human evaluation and sample quality scores (FID, Inception Score, Classification Accuracy of a pre-trained ResNet-50, and Perceptual Distance against original images). We expect this standardized evaluation protocol to play a role in advancing image-to-image translation research. Finally, we show that a generalist, multi-task diffusion model performs as well or better than task-specific specialist counterparts. Check out https://diffusion-palette.github.io for an overview of the results.},
archiveprefix = {arXiv},
month = aug,
pages = {1--10},
publisher = {{ACM}},
address = {{Vancouver BC Canada}},
doi = {10.1145/3528233.3530757},
isbn = {978-1-4503-9337-9},
langid = {english},
keywords = {Computer Science - Computer Vision and Pattern Recognition,Computer Science - Machine Learning},
file = {C\:\\Users\\sterg\\Zotero\\storage\\ICZ3654S\\Saharia et al. - 2022 - Palette Image-to-Image Diffusion Models.pdf}
file = {C\:\\Users\\sterg\\Zotero\\storage\\KXPL42QN\\Saharia et al. - 2022 - Palette Image-to-Image Diffusion Models.pdf}
}

@misc{selfies,
Expand Down

0 comments on commit 0b34988

Please sign in to comment.