tex: Introduction, data extraction cleanups
[gostyle.git] / tex / gostyle.bib
blob8d8261439e55c33757c6ac2c6555329d4080b43d
1 % This file was created with JabRef 2.6b2.
2 % Encoding: UTF-8
4 @MISC{GoR,
5 author = {Ales Cieply and others},
6 title = {EGF ratings system -- System description},
7 owner = {pasky},
8 timestamp = {2011.03.10},
9 url = {http://www.europeangodatabase.eu/EGD/EGF_rating_system.php}
12 @ARTICLE{CoverHart1967,
13 author = {Thomas M. Cover and Peter E. Hart},
14 title = {Nearest neighbor pattern classification},
15 journal = {IEEE Transactions on Information Theory},
16 year = {1967},
17 volume = {13},
18 pages = {21-27},
19 number = {1},
20 owner = {hellboy},
21 timestamp = {2010.03.10}
24 @INPROCEEDINGS{GellySilver2008,
25 author = {Gelly, Sylvain and Silver, David},
26 title = {Achieving master level play in 9x9 computer go},
27 booktitle = {AAAI'08: Proceedings of the 23rd national conference on Artificial
28 intelligence},
29 year = {2008},
30 pages = {1537--1540},
31 publisher = {AAAI Press},
32 abstract = {The UCT algorithm uses Monte-Carlo simulation to estimate the value
33 of states in a search tree from the current state. However, the first
34 time a state is encountered, UCT has no knowledge, and is unable
35 to generalise from previous experience. We describe two extensions
36 that address these weaknesses. Our first algorithm, heuristic UCT,
37 incorporates prior knowledge in the form of a value function. The
38 value function can be learned offline, using a linear combination
39 of a million binary features, with weights trained by temporal-difference
40 learning. Our second algorithm, UCT-RAVE, forms a rapid online generalisation
41 based on the value of moves. We applied our algorithms to the domain
42 of 9 × 9 Computer Go, using the program MoGo. Using both heuristic
43 UCT and RAVE, MoGo became the first program to achieve human master
44 level in competitive play.},
45 isbn = {978-1-57735-368-3},
46 location = {Chicago, Illinois}
49 @BOOK{Jolliffe1986,
50 title = {Principal Component Analysis},
51 publisher = {Springer, New York},
52 year = {1986},
53 author = {I.T. Jolliffe},
54 owner = {hellboy}
57 @ELECTRONIC{KohonenPy,
58 author = {lmjohns3},
59 title = {python-kohonen, {A} library of {Kohonen} maps},
60 howpublished = {Released under {MIT} License},
61 url = {http://code.google.com/p/python-kohonen/},
62 owner = {hellboy},
63 timestamp = {2010.03.10}
66 @TECHREPORT{Nissen2003,
67 author = {S. Nissen},
68 title = {Implementation of a Fast Artificial Neural Network Library (fann)},
69 institution = {Department of Computer Science University of Copenhagen (DIKU)},
70 year = {2003},
71 note = {http://fann.sf.net},
72 owner = {hellboy},
73 timestamp = {2010.03.10}
76 @ARTICLE{Pearson,
77 author = {J. L. Rodgers and W. A. Nicewander},
78 title = {{Thirteen ways to look at the correlation coefficient}},
79 journal = {The American Statistician},
80 year = {1988},
81 month = {Feb},
82 volume = {42},
83 number = {1},
84 pages = {59--66},
85 issn = {},
86 owner = {pasky}
89 @ELECTRONIC{Python25,
90 author = {{Python Software Foundation}},
91 month = {September},
92 year = {2006},
93 title = {Python 2.5},
94 url = {http://www.python.org/dev/peps/pep-0356/},
95 owner = {hellboy},
96 timestamp = {2009.04.29}
99 @INPROCEEDINGS{Riedmiller1993,
100 author = {Martin Riedmiller and Heinrich Braun},
101 title = {{A Direct Adaptive Method for Faster Backpropagation Learning: The
102 RPROP Algorithm}},
103 booktitle = {IEEE International Conference on Neural Networks},
104 year = {1993},
105 pages = {586--591},
106 owner = {hellboy},
107 timestamp = {2010.03.07}
110 @ARTICLE{MDP,
111 author = {Zito Tiziano and Wilbert Niko and Wiskott Laurenz and Berkes Pietro},
112 title = {Modular toolkit for {D}ata {P}rocessing ({MDP}): a {P}ython data
113 processing framework},
114 journal = {Frontiers in Neuroinformatics},
115 year = {2008},
116 volume = {2},
117 abstract = {Modular toolkit for Data Processing (MDP) is a data processing framework
118 written in Python. From the user's perspective, MDP is a collection
119 of supervised and unsupervised learning algorithms and other data
120 processing units that can be combined into data processing sequences
121 and more complex feed-forward network architectures. Computations
122 are performed efficiently in terms of speed and memory requirements.
123 From the scientific developer's perspective, MDP is a modular
124 framework, which can easily be expanded. The implementation of new
125 algorithms is easy and intuitive. The new implemented units are then
126 automatically integrated with the rest of the library. MDP has been
127 written in the context of theoretical research in neuroscience, but
128 it has been designed to be helpful in any context where trainable
129 data processing algorithms are used. Its simplicity on the user's
130 side, the variety of readily available algorithms, and the reusability
131 of the implemented units make it also a useful educational tool.},
132 doi = {10.3389/neuro.11/008.2008},
133 issn = {ISSN 1662-5196},
134 owner = {hellboy},
135 timestamp = {2010.03.05}
138 @comment{jabref-meta: selector_publisher:}
140 @comment{jabref-meta: selector_author:}
142 @comment{jabref-meta: selector_journal:}
144 @comment{jabref-meta: selector_keywords:}