Permalink
Switch branches/tags
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
202 lines (192 sloc) 10.7 KB
@inproceedings{brown2018depth,
title={Depth-Limited Solving for Imperfect-Information Games},
author={Brown, Noam and Sandholm, Tuomas and Amos, Brandon},
year={2018},
booktitle={NIPS},
_venue={NIPS},
url={http://arxiv.org/abs/1805.08195},
abstract={
A fundamental challenge in imperfect-information games is that states do not have well-defined values. As a result, depth-limited search algorithms used in single-agent settings and perfect-information games do not apply. This paper introduces a principled way to conduct depth-limited solving in imperfect-information games by allowing the opponent to choose among a number of strategies for the remainder of the game at the depth limit. Each one of these strategies results in a different set of values for leaf nodes. This forces an agent to be robust to the different strategies an opponent may employ. We demonstrate the effectiveness of this approach by building a master-level heads-up no-limit Texas hold'em poker AI that defeats two prior top agents using only a 4-core CPU and 16 GB of memory. Developing such a powerful agent would have previously required a supercomputer.
}
}
@inproceedings{amos2018end,
title={{{Differentiable MPC for End-to-end Planning and Control}}},
author={Amos, Brandon and Sacks, Jacob and Rodriguez, Ivan Dario Jimenez and Boots, Byron and Kolter, J Zico},
year={2018},
booktitle={NIPS},
_venue={NIPS},
abstract={
In this paper we present foundations for using model predictive control (MPC) as a differentiable policy class in reinforcement learning. Specifically, we differentiate through MPC by using the KKT conditions of the convex approximation at a fixed point of the solver. Using this strategy, we are able to learn the cost and dynamics of a controller via end-to-end learning in a larger system. We empirically show results in an imitation learning setting, demonstrating that we can recover the underlying dynamics and cost more efficiently and reliably than with a generic neural network policy class
}
}
@inproceedings{amos2018learning,
title={Learning Awareness Models},
author={Brandon Amos and Laurent Dinh and Serkan Cabi and Thomas Roth{\"o}rl and Sergio G{\'o}mez Colmenarejo and Alistair Muldal and Tom Erez and Yuval Tassa and Nando de Freitas and Misha Denil},
booktitle={International Conference on Learning Representations},
year={2018},
url={https://openreview.net/forum?id=r1HhRfWRZ},
_venue={ICLR},
abstract={
We consider the setting of an agent with a fixed body interacting with an
unknown and uncertain external world. We show that models
trained to predict proprioceptive information about the
agent's body come to represent objects in the external world.
In spite of being trained with only internally available
signals, these dynamic body models come to represent external
objects through the necessity of predicting their effects on
the agent's own body. That is, the model learns holistic
persistent representations of objects in the world, even
though the only training signals are body signals. Our
dynamics model is able to successfully predict distributions
over 132 sensor readings over 100 steps into the future and we
demonstrate that even when the body is no longer in contact
with an object, the latent variables of the dynamics model
continue to represent its shape. We show that active data
collection by maximizing the entropy of predictions about the
body---touch sensors, proprioception and vestibular
information---leads to learning of dynamic models that show
superior performance when used for control. We also collect
data from a real robotic hand and show that the same models
can be used to answer questions about properties of objects in
the real world. Videos with qualitative results of our models
are available <a href="https://goo.gl/mZuqAV">here</a>.
}
}
@inproceedings{wang2017scalable,
title={A Scalable and Privacy-Aware IoT Service for Live Video Analytics},
author={Wang, Junjue and Amos, Brandon and Das, Anupam and Pillai, Padmanabhan and Sadeh, Norman and Satyanarayanan, Mahadev},
booktitle={Proceedings of the 8th ACM on Multimedia Systems Conference},
pages={38--49},
year={2017},
organization={ACM},
_venue={ACM MMSys},
_note={Best Paper Award},
}
@inproceedings{donti2017task,
title={Task-based End-to-end Model Learning},
author={Donti, Priya L and Amos, Brandon and Kolter, J Zico},
year={2017},
booktitle={NIPS},
_venue={NIPS},
codeurl={https://github.com/locuslab/e2e-model-learning},
url={http://arxiv.org/abs/1703.04529},
abstract={
As machine learning techniques have become more ubiquitous, it has
become common to see machine learning prediction algorithms operating
within some larger process. However, the criteria by which we train
machine learning algorithms often differ from the ultimate criteria on
which we evaluate them. This paper proposes an end-to-end approach for
learning probabilistic machine learning models within the context of
stochastic programming, in a manner that directly captures the
ultimate task-based objective for which they will be used. We then
present two experimental evaluations of the proposed approach, one as
applied to a generic inventory stock problem and the second to a
real-world electrical grid scheduling task. In both cases, we show
that the proposed approach can outperform both a traditional modeling
approach and a purely black-box policy optimization approach.
}
}
@inproceedings{amos2017optnet,
title = "{OptNet: Differentiable Optimization as a Layer in Neural Networks}",
author={Brandon Amos and J. Zico Kolter},
booktitle={ICML},
_venue={ICML},
codeurl={https://github.com/locuslab/optnet},
year={2017},
url={http://arxiv.org/abs/1703.00443},
abstract={
This paper presents OptNet, a network architecture that integrates
optimization problems (here, specifically in the form of quadratic programs)
as individual layers in larger end-to-end trainable deep networks.
These layers encode constraints and complex dependencies
between the hidden states that traditional convolutional and
fully-connected layers often cannot capture.
In this paper, we explore the foundations for such an architecture:
we show how techniques from sensitivity analysis, bilevel
optimization, and implicit differentiation can be used to
exactly differentiate through these layers and with respect
to layer parameters;
we develop a highly efficient solver for these layers that exploits fast
GPU-based batch solves within a primal-dual interior point method, and which
provides backpropagation gradients with virtually no additional cost on top of
the solve;
and we highlight the application of these approaches in several problems.
In one notable example, we show that the method is
capable of learning to play mini-Sudoku (4x4) given just input and output games,
with no a priori information about the rules of the game;
this highlights the ability of our architecture to learn hard
constraints better than other neural architectures.
}
}
@inproceedings{amos2017input,
title={Input Convex Neural Networks},
author={Brandon Amos and Lei Xu and J. Zico Kolter},
booktitle={ICML},
_venue={ICML},
codeurl={https://github.com/locuslab/icnn},
year={2017},
url={http://arxiv.org/abs/1609.07152},
abstract={
This paper presents the input convex neural network
architecture. These are scalar-valued (potentially deep) neural
networks with constraints on the network parameters such that the
output of the network is a convex function of (some of) the inputs.
The networks allow for efficient inference via optimization over some
inputs to the network given others, and can be applied to settings
including structured prediction, data imputation, reinforcement
learning, and others. In this paper we lay the basic groundwork for
these models, proposing methods for inference, optimization and
learning, and analyze their representational power. We show that many
existing neural network architectures can be made input-convex with
a minor modification, and develop specialized optimization
algorithms tailored to this setting. Finally, we highlight the
performance of the methods on multi-label prediction, image
completion, and reinforcement learning problems, where we show
improvement over the existing state of the art in many cases.
}
}
@inproceedings{zhao2016collapsed,
title={{{Collapsed Variational Inference for Sum-Product Networks}}},
author={Han Zhao and Tameem Adel and Geoff Gordon and Brandon Amos},
booktitle={ICML},
_venue={ICML},
year={2016},
url={http://www.cs.cmu.edu/~hzhao1/papers/ICML2016/BL-SPN-main.pdf},
abstract={
Sum-Product Networks (SPNs) are probabilistic inference machines that admit
exact inference in linear time in the size of the network. Existing
parameter learning approaches for SPNs are largely based on the maximum
likelihood principle and hence are subject to overfitting compared to
more Bayesian approaches. Exact Bayesian posterior inference for SPNs is
computationally intractable. Both standard variational inference and
posterior sampling for SPNs are computationally infeasible even for
networks of moderate size due to the large number of local latent
variables per instance. In this work, we propose a novel deterministic
collapsed variational inference algorithm for SPNs that is
computationally efficient, easy to implement and at the same time allows
us to incorporate prior information into the optimization formulation.
Extensive experiments show a significant improvement in accuracy compared
with a maximum likelihood based approach.
}
}
@inproceedings{amos2013applying,
title={{{Applying machine learning classifiers to dynamic Android
malware detection at scale}}},
author={Amos, Brandon and Turner, Hamilton and White, Jules},
booktitle={IWCMC Security, Trust and Privacy Symposium},
_venue={IWCMC},
year={2013},
codeurl={https://github.com/VT-Magnum-Research/antimalware},
url={http://bamos.github.io/data/papers/amos-iwcmc2013.pdf},
abstract={
The widespread adoption and contextually sensitive
nature of smartphone devices has increased concerns over smartphone
malware. Machine learning classifiers are a current method
for detecting malicious applications on smartphone systems. This
paper presents the evaluation of a number of existing classifiers,
using a dataset containing thousands of real (i.e. not synthetic)
applications. We also present our STREAM framework, which
was developed to enable rapid large-scale validation of mobile
malware machine learning classifiers.
}
}