@inbook{dd51dbe310af4feeba7477355bc8929e,
title = "Learning from Monte Carlo Rollouts with Opponent Models for Playing Tron",
abstract = "This paper describes a novel reinforcement learning system for learning to play the game of Tron. The system combines Q-learning, multi-layer perceptrons, vision grids, opponent modelling, and Monte Carlo rollouts in a novel way. By learning an opponent model, Monte Carlo rollouts can be effectively applied to generate state trajectories for all possible actions from which improved action estimates can be computed. This allows to extend experience replay by making it possible to update the state-action values of all actions in a given game state simultaneously. The results show that the use of experience replay that updates the Q-values of all actions simultaneously strongly outperforms the conventional experience replay that only updates the Q-value of the performed action. The results also show that using short or long rollout horizons during training lead to similar good performances against two fixed opponents.",
keywords = "Reinforcement Learning, Neural Networks, Computer Games",
author = "Stefan Knegt and Drugan, {Madalina M.} and Marco Wiering",
year = "2018",
month = dec,
day = "30",
doi = "10.1007/978-3-030-05453-3_6",
language = "English",
isbn = "978-3-030-05452-6",
series = " Lecture Notes in Computer Science book series ",
publisher = "Springer",
pages = "105--129",
editor = "{ van den Herik }, J. and A. Rocha",
booktitle = "ICAART 2018",
note = "ICAART 2018 : International Conference on Agents and Artificial Intelligence ; Conference date: 16-01-2018 Through 18-01-2018",
}