This paper investigates a novel model-free reinforcement learning architecture, the Natural Actor-Critic. The actor updates are based on stochastic policy gradients employing AmariÕs natural gradient approach, while the critic obtains both the natural policy gradient and additional parameters of a value function simultaneously by linear regres- sion. We show that actor improvements with natural policy gradients are particularly appealing as these are independent of coordinate frame of the chosen policy representation, and can be estimated more efficiently than regular policy gradients. The critic makes use of a special basis function parameterization motivated by the policy-gradient compatible function approximation. We show that several well-known reinforcement learning methods such as the original Actor-Critic and BradtkeÕs Linear Quadratic Q-Learning are in fact Natural Actor-Critic algorithms. Em- pirical evaluations illustrate the effectiveness of our techniques in com- parison to previous methods, and also demonstrate their applicability for learning control on an anthropomorphic robot arm.

Author(s): Peters, J. and Vijayakumar, S. and Schaal, S.
Book Title: Proceedings of the 16th European Conference on Machine Learning
Volume: 3720
Pages: 280-291
Year: 2005
Editors: Gama, J.;Camacho, R.;Brazdil, P.;Jorge, A.;Torgo, L.
Publisher: Springer
Bibtex Type: Conference Paper (inproceedings)
DOI: 10.1007/11564096_29
Event Name: ECML 2005
Event Place: Porto, Portugal
URL: http://www-clmc.usc.edu/publications/P/peters-ECML2005.pdf
Cross Ref: p2574
Electronic Archiving: grant_archive
Note: clmc

BibTex

@inproceedings{Peters_PECML_2005,
  title = {Natural Actor-Critic},
  booktitle = {Proceedings of the 16th European Conference on Machine Learning},
  abstract = {This paper investigates a novel model-free reinforcement learning architecture, the Natural Actor-Critic. The actor updates are based on stochastic policy gradients employing AmariÕs natural gradient approach, while the critic obtains both the natural policy gradient and additional parameters of a value function simultaneously by linear regres- sion. We show that actor improvements with natural policy gradients are particularly appealing as these are independent of coordinate frame of the chosen policy representation, and can be estimated more efficiently than regular policy gradients. The critic makes use of a special basis function parameterization motivated by the policy-gradient compatible function approximation. We show that several well-known reinforcement learning methods such as the original Actor-Critic and BradtkeÕs Linear Quadratic Q-Learning are in fact Natural Actor-Critic algorithms. Em- pirical evaluations illustrate the effectiveness of our techniques in com- parison to previous methods, and also demonstrate their applicability for learning control on an anthropomorphic robot arm.},
  volume = {3720},
  pages = {280-291},
  editors = {Gama, J.;Camacho, R.;Brazdil, P.;Jorge, A.;Torgo, L.},
  publisher = {Springer},
  year = {2005},
  note = {clmc},
  slug = {peters_pecml_2005},
  author = {Peters, J. and Vijayakumar, S. and Schaal, S.},
  crossref = {p2574},
  url = {http://www-clmc.usc.edu/publications/P/peters-ECML2005.pdf}
}