tex: bla bla ;)
[gostyle.git] / tex / gostyle.tex
blob4d61414b613de7c0c087cba7a73ea1de062e87d3
1 \documentclass[journal]{IEEEtran}
3 \usepackage{cite}
4 % cite.sty was written by Donald Arseneau
5 % V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
6 % \cite{} output to follow that of IEEE. Loading the cite package will
7 % result in citation numbers being automatically sorted and properly
8 % "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
9 % cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
10 % \cite will automatically add leading space, if needed. Use cite.sty's
11 % noadjust option (cite.sty V3.8 and later) if you want to turn this off.
12 % cite.sty is already installed on most LaTeX systems. Be sure and use
13 % version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
14 % not currently provide for hyperlinked citations.
15 % The latest version can be obtained at:
16 % http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
17 % The documentation is contained in the cite.sty file itself.
20 % *** GRAPHICS RELATED PACKAGES ***
22 \ifCLASSINFOpdf
23 % \usepackage[pdftex]{graphicx}
24 % declare the path(s) where your graphic files are
25 % \graphicspath{{../pdf/}{../jpeg/}}
26 % and their extensions so you won't have to specify these with
27 % every instance of \includegraphics
28 % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
29 \else
30 % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
31 % will default to the driver specified in the system graphics.cfg if no
32 % driver is specified.
33 % \usepackage[dvips]{graphicx}
34 % declare the path(s) where your graphic files are
35 % \graphicspath{{../eps/}}
36 % and their extensions so you won't have to specify these with
37 % every instance of \includegraphics
38 % \DeclareGraphicsExtensions{.eps}
39 \fi
41 \usepackage{algorithm}
42 \usepackage{algorithmic}
43 %\usepackage{algpseudocode}
44 % WICKED: nefunguje ani jedno???
45 % algorithmic.sty can be obtained at:
46 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
47 % There is also a support site at:
48 % http://algorithms.berlios.de/index.html
49 % Also of interest may be the (relatively newer and more customizable)
50 % algorithmicx.sty package by Szasz Janos:
51 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/
53 % *** ALIGNMENT PACKAGES ***
55 %\usepackage{array}
56 % http://www.ctan.org/tex-archive/macros/latex/required/tools/
59 \usepackage{amsmath}
60 %\usepackage{mdwtab}
61 % http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/
64 % IEEEtran contains the IEEEeqnarray family of commands that can be used to
65 % generate multiline equations as well as matrices, tables, etc., of high
66 % quality.
68 %\usepackage{eqparbox}
69 % Also of notable interest is Scott Pakin's eqparbox package for creating
70 % (automatically sized) equal width boxes - aka "natural width parboxes".
71 % Available at:
72 % http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/
76 % *** SUBFIGURE PACKAGES ***
77 %\usepackage[tight,footnotesize]{subfigure}
78 % subfigure.sty was written by Steven Douglas Cochran. This package makes it
79 % easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
80 % work, it is a good idea to load it with the tight package option to reduce
81 % the amount of white space around the subfigures. subfigure.sty is already
82 % installed on most LaTeX systems. The latest version and documentation can
83 % be obtained at:
84 % http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
85 % subfigure.sty has been superceeded by subfig.sty.
89 %\usepackage[caption=false]{caption}
90 %\usepackage[font=footnotesize]{subfig}
91 % subfig.sty, also written by Steven Douglas Cochran, is the modern
92 % replacement for subfigure.sty. However, subfig.sty requires and
93 % automatically loads Axel Sommerfeldt's caption.sty which will override
94 % IEEEtran.cls handling of captions and this will result in nonIEEE style
95 % figure/table captions. To prevent this problem, be sure and preload
96 % caption.sty with its "caption=false" package option. This is will preserve
97 % IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later
98 % (recommended due to many improvements over 1.2) of subfig.sty supports
99 % the caption=false option directly:
100 %\usepackage[caption=false,font=footnotesize]{subfig}
102 % The latest version and documentation can be obtained at:
103 % http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
104 % The latest version and documentation of caption.sty can be obtained at:
105 % http://www.ctan.org/tex-archive/macros/latex/contrib/caption/
109 % *** FLOAT PACKAGES ***
111 %\usepackage{fixltx2e}
112 % fixltx2e, the successor to the earlier fix2col.sty, was written by
113 % Frank Mittelbach and David Carlisle. This package corrects a few problems
114 % in the LaTeX2e kernel, the most notable of which is that in current
115 % LaTeX2e releases, the ordering of single and double column floats is not
116 % guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
117 % single column figure to be placed prior to an earlier double column
118 % figure. The latest version and documentation can be found at:
119 % http://www.ctan.org/tex-archive/macros/latex/base/
123 %\usepackage{stfloats}
124 % stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
125 % the ability to do double column floats at the bottom of the page as well
126 % as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
127 % LaTeX2e). It also provides a command:
128 %\fnbelowfloat
129 % to enable the placement of footnotes below bottom floats (the standard
130 % LaTeX2e kernel puts them above bottom floats). This is an invasive package
131 % which rewrites many portions of the LaTeX2e float routines. It may not work
132 % with other packages that modify the LaTeX2e float routines. The latest
133 % version and documentation can be obtained at:
134 % http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
135 % Documentation is contained in the stfloats.sty comments as well as in the
136 % presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
137 % does not allow \baselineskip to stretch. Authors submitting work to the
138 % IEEE should note that IEEE rarely uses double column equations and
139 % that authors should try to avoid such use. Do not be tempted to use the
140 % cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
141 % not format its papers in such ways.
144 %\ifCLASSOPTIONcaptionsoff
145 % \usepackage[nomarkers]{endfloat}
146 % \let\MYoriglatexcaption\caption
147 % \renewcommand{\caption}[2][\relax]{\MYoriglatexcaption[#2]{#2}}
148 %\fi
149 % endfloat.sty was written by James Darrell McCauley and Jeff Goldberg.
150 % This package may be useful when used in conjunction with IEEEtran.cls'
151 % captionsoff option. Some IEEE journals/societies require that submissions
152 % have lists of figures/tables at the end of the paper and that
153 % figures/tables without any captions are placed on a page by themselves at
154 % the end of the document. If needed, the draftcls IEEEtran class option or
155 % \CLASSINPUTbaselinestretch interface can be used to increase the line
156 % spacing as well. Be sure and use the nomarkers option of endfloat to
157 % prevent endfloat from "marking" where the figures would have been placed
158 % in the text. The two hack lines of code above are a slight modification of
159 % that suggested by in the endfloat docs (section 8.3.1) to ensure that
160 % the full captions always appear in the list of figures/tables - even if
161 % the user used the short optional argument of \caption[]{}.
162 % IEEE papers do not typically make use of \caption[]'s optional argument,
163 % so this should not be an issue. A similar trick can be used to disable
164 % captions of packages such as subfig.sty that lack options to turn off
165 % the subcaptions:
166 % For subfig.sty:
167 % \let\MYorigsubfloat\subfloat
168 % \renewcommand{\subfloat}[2][\relax]{\MYorigsubfloat[]{#2}}
169 % For subfigure.sty:
170 % \let\MYorigsubfigure\subfigure
171 % \renewcommand{\subfigure}[2][\relax]{\MYorigsubfigure[]{#2}}
172 % However, the above trick will not work if both optional arguments of
173 % the \subfloat/subfig command are used. Furthermore, there needs to be a
174 % description of each subfigure *somewhere* and endfloat does not add
175 % subfigure captions to its list of figures. Thus, the best approach is to
176 % avoid the use of subfigure captions (many IEEE journals avoid them anyway)
177 % and instead reference/explain all the subfigures within the main caption.
178 % The latest version of endfloat.sty and its documentation can obtained at:
179 % http://www.ctan.org/tex-archive/macros/latex/contrib/endfloat/
181 % The IEEEtran \ifCLASSOPTIONcaptionsoff conditional can also be used
182 % later in the document, say, to conditionally put the References on a
183 % page by themselves.
185 % *** PDF, URL AND HYPERLINK PACKAGES ***
187 %\usepackage{url}
188 % url.sty was written by Donald Arseneau. It provides better support for
189 % handling and breaking URLs. url.sty is already installed on most LaTeX
190 % systems. The latest version can be obtained at:
191 % http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
192 % Read the url.sty source comments for usage information. Basically,
193 % \url{my_url_here}.
196 % *** Do not adjust lengths that control margins, column widths, etc. ***
197 % *** Do not use packages that alter fonts (such as pslatex). ***
198 % There should be no need to do such things with IEEEtran.cls V1.6 and later.
199 % (Unless specifically asked to do so by the journal or conference you plan
200 % to submit to, of course. )
202 % correct bad hyphenation here
203 \hyphenation{op-tical net-works semi-conduc-tor}
206 \begin{document}
208 % paper title
209 % can use linebreaks \\ within to get better formatting as desired
210 \title{On Pattern Feature Trends in Large Go Game Corpus}
212 % use \thanks{} to gain access to the first footnote area
213 % a separate \thanks must be used for each paragraph as LaTeX2e's \thanks
214 % was not built to handle multiple paragraphs
215 \author{Petr~Baudis,~Josef~Moudrik% <-this % stops a space
216 \thanks{P. Baudis is student at the Faculty of Math and Physics, Charles University, Prague, CZ, and also does some of his Computer Go research as an employee of SUSE Labs Prague, Novell CZ.}% <-this % stops a space
217 \thanks{J. Moudrik is student at the Faculty of Math and Physics, Charles University, Prague, CZ.}}
219 % note the % following the last \IEEEmembership and also \thanks -
220 % these prevent an unwanted space from occurring between the last author name
221 % and the end of the author line. i.e., if you had this:
223 % \author{....lastname \thanks{...} \thanks{...} }
224 % ^------------^------------^----Do not want these spaces!
226 % a space would be appended to the last name and could cause every name on that
227 % line to be shifted left slightly. This is one of those "LaTeX things". For
228 % instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
229 % "AB" then you have to do: "\textbf{A}\textbf{B}"
230 % \thanks is no different in this regard, so shield the last } of each \thanks
231 % that ends a line with a % and do not let a space in before the next \thanks.
232 % Spaces after \IEEEmembership other than the last one are OK (and needed) as
233 % you are supposed to have spaces between the names. For what it is worth,
234 % this is a minor point as most people would not even notice if the said evil
235 % space somehow managed to creep in.
238 % The paper headers
239 \markboth{Transactions on Computational Intelligence and AI in Games}%
240 {On Pattern Feature Trends in Large Go Game Corpus}
241 % The only time the second header will appear is for the odd numbered pages
242 % after the title page when using the twoside option.
244 % *** Note that you probably will NOT want to include the author's ***
245 % *** name in the headers of peer review papers. ***
246 % You can use \ifCLASSOPTIONpeerreview for conditional compilation here if
247 % you desire.
252 % If you want to put a publisher's ID mark on the page you can do it like
253 % this:
254 %\IEEEpubid{0000--0000/00\$00.00~\copyright~2007 IEEE}
255 % Remember, if you use this you must call \IEEEpubidadjcol in the second
256 % column for its text to clear the IEEEpubid mark.
260 % use for special paper notices
261 %\IEEEspecialpapernotice{(Invited Paper)}
266 % make the title area
267 \maketitle
270 \begin{abstract}
271 %\boldmath
273 We process a~large corpus of game records of the board game of Go and
274 propose a~way to extract per-player summary information on played moves.
275 We then apply several basic data-mining methods on the summary
276 information to identify the most differentiating features within the
277 summary information, and discuss their correspondence with traditional
278 Go knowledge. We show mappings of the features to player attributes
279 like playing strength or informally perceived "playing style" (such as
280 territoriality or aggressivity), and propose applications including
281 seeding real-work ranks of internet players, aiding in Go study, or
282 contribution to discussion within Go theory on the scope of "playing
283 style".
285 \end{abstract}
286 % IEEEtran.cls defaults to using nonbold math in the Abstract.
287 % This preserves the distinction between vectors and scalars. However,
288 % if the journal you are submitting to favors bold math in the abstract,
289 % then you can use LaTeX's standard command \boldmath at the very start
290 % of the abstract to achieve this. Many IEEE journals frown on math
291 % in the abstract anyway.
293 % Note that keywords are not normally used for peerreview papers.
294 \begin{IEEEkeywords}
295 board games, go, data mining, player strength, playing style
296 \end{IEEEkeywords}
303 % For peer review papers, you can put extra information on the cover
304 % page as needed:
305 % \ifCLASSOPTIONpeerreview
306 % \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
307 % \fi
309 % For peerreview papers, this IEEEtran command inserts a page break and
310 % creates the second title. It will be ignored for other modes.
311 \IEEEpeerreviewmaketitle
315 \section{Introduction}
316 % The very first letter is a 2 line initial drop letter followed
317 % by the rest of the first word in caps.
319 % form to use if the first word consists of a single letter:
320 % \IEEEPARstart{A}{demo} file is ....
322 % form to use if you need the single drop letter followed by
323 % normal text (unknown if ever used by IEEE):
324 % \IEEEPARstart{A}{}demo file is ....
326 % Some journals put the first two words in caps:
327 % \IEEEPARstart{T}{his demo} file is ....
329 % Here we have the typical use of a "T" for an initial drop letter
330 % and "HIS" in caps to complete the first word.
331 \IEEEPARstart{T}{he} field of Computer Go usually focuses on the problem
332 of creating a~program to play the game, finding the best move from a~given
333 board position. We will make use of one method developed in the course
334 of such research and apply it to the analysis of existing game records
335 with the aim of helping humans to play the game better instead.
337 Go is a~two-player full-information board game played
338 on a~square grid (usually $19\times19$ lines) with black and white
339 stones; the goal of the game is to surround the most territory and
340 capture enemy stones. We assume basic familiarity with the game.
342 Many Go players are eager to play using computers (usually over
343 the internet) and review games played by others on computers as well.
344 This means that large amounts of game records are collected and digitally
345 stored, enabling easy processing of such collections. However, so far
346 only little has been done with the available data --- we are aware
347 only of uses for simple win/loss statistics (TODO: KGS Stats, KGS Analytics,
348 Pro Go Rating) and ''next move'' statistics on a~specific position (TODO:
349 Kombilo, Moyo Go Studio).
351 We present a~more in-depth approach --- from all played moves, we devise
352 a~compact evaluation of each player. We then explore correlations between
353 evaluations of various players in light of externally given information.
354 This way, we can discover similarity between moves characteristics of
355 players with the same playing strength, or discuss the meaning of the
356 "playing style" concept on the assumption that similar playing styles
357 should yield similar moves characteristics.
360 \section{Expert-based knowledge}
361 \label{style-vectors}
362 In order to provide a reference frame for our style analysis,
363 we have gathered some expert-based information about various
364 traditionally perceived style aspects.
365 Three high-level Go players (Alexander Dinerstein 3-pro, Motoki Noguchi
366 7-dan and Vit Brunner 4-dan) have judged style of several Go
367 professionals -- we call them \emph{reference playerse} -- chosen for both
368 being well-known within the community and having large number of played games in our collection.
370 This expert-based knowledge allows us to predict styles of unknown players based on
371 the similarity of their pattern vectors, as well as discover correlations between
372 styles and proportions of played patterns.
374 Experts were asked to assign each of player's style a number
375 on a scale from 1 to 10. These are interpreted
376 as shown in the table below.
378 \vspace{4mm}
379 \noindent
380 %\begin{table}
381 \begin{center}
382 %\caption{Styles}
383 \begin{tabular}{|c|c|c|}
384 \hline
385 \multicolumn{3}{|c|}{Styles} \\ \hline
386 Style & 1 & 10\\ \hline
387 Territoriality & Moyo & Territorial \\
388 Orthodoxity & Classic & Novel \\
389 Aggressivity & Calm & Figting \\
390 Thickness & Safe & Shinogi \\ \hline
391 \end{tabular}
392 \end{center}
393 %\end{table}
394 \vspace{4mm}
396 Averaging this expert based evaluation yields
397 \emph{reference style vector} $\vec s_r$ (of dimension $4$) for each player $r$
398 from the set of \emph{reference players} $R$.
400 %-- each with a \emph{pattern vector} $\vec p_i$ and \emph{style vector} $\vec s_i$.
402 \section{Data Extraction}
403 \label{pattern-vectors}
404 In addition to the explicit expert knowledge, we use the data obtained by...
406 TODO rozvest uvod, nemuze se zacinat jenom As the input...
408 As the input, we assume a~collection of game records\footnote{We
409 use the SGF format (TODO) in our implementation.} organized by player names.
410 We use two collections; the first one is GoGoD Winter 2009 (TODO) containing 42000 (TODO)
411 professional games, dating from the early Go history 1500 years ago to the present.
412 We use this collection for style analysis and detailed correlation analysis
413 of well-known Go professionals.
414 The other source is Go Teaching Ladder reviews (TODO). These include 7600 games
415 of players spanning over all strength levels; we use this collection
416 for finding correlations between moves of players of the same strength rank.
418 In order to generate the required compact description of most frequently played moves,
419 we construct a set of $n$ most occuring patterns (\emph{top patterns})
420 across all players and games from the database\footnote{We use $n=500$ in our analysis.}.
421 For each player, we then count how many times was each of those $n$ patterns played
422 during all his games and finally assign him a~{\em pattern vector} $\vec p$ of dimension $n$, with each
423 dimension corresponding to the relative number of occurences of a given pattern
424 (with respect to player's most played \emph{top pattern}). Using relative numbers of occurences ensures that
425 each dimension of player's \emph{pattern vector} is scaled to range $[0,1]$ and
426 therefore even players with different number of games in the database have comparable \emph{pattern vectors}.
428 \subsection{Pattern Features}
429 TODO sladit aby to navazovalo na predchozi odstavec
431 Of course a big question is how to compose the pattern descriptions.
432 There are some tradeoffs in play - overly general descriptions carry too few
433 information to discern various player attributes; too specific descriptions
434 gather too few specimen over the games and the differences in vectors are
435 not statistically significant.
437 We have chosen an intuitive and simple approach inspired by pattern features
438 used when computing ELO ratings for candidate patterns in Computer Go play.
439 \cite{ELO} Each pattern is a~combination of several {\em pattern features}
440 matched at the position of the played move. We use these features:
442 \begin{itemize}
443 \item board edge distance
444 \item capture move flag
445 \item atari move flag
446 \item atari escape flag
447 \item spatial pattern --- a configuration of stones around the played move,
448 with differentiated color to play; configurations with diameter between
449 2 and 9 are matched\footnote{The diameter determines maximal distance from
450 center in the {\em gridcular} metric: $d(x,y) = |\delta x| + |\delta y| + \max(|\delta x|, |\delta y|)$}
451 \end{itemize}
453 \subsection{Implementation}
455 We have implemented the data extraction by making use of the pattern
456 features matching implementation within the Pachi go-playing program
457 (TODO). We extract information on players by converting the SGF game
458 records to GTP (TODO) stream that feeds Pachi's {\tt patternscan}
459 engine which outputs a~single patternspec per move. We can then gather
460 all encountered patternspecs belonging to a~given player and summarize
461 them; the $\vec p$ vector then consists of normalized counts of
462 the given $n$ most frequent patternspecs.
465 \section{Data Mining}
466 \label{data-mining}
468 To assess the properties of gathered \emph{pattern vectors} and their influence on playing styles,
469 we have analysed the data by a~few basic data minining techniques.
471 First two methods rely purely on data gathered from the GoGoD database. Principal component
472 analysis finds orthogonal vector components that have biggest variance. Reversing the process then
473 indicates which patterns correlate with each style. Additionally, PCA can be used as a vector-preprocessing
474 for methods that are negatively sensitive to \emph{pattern vector} component correlations.
476 A~second method -- Kohonen's networks -- is based on the theory of self-organizing maps of neurons that
477 compete against each other for representation of the input space. Because neurons in the network are
478 organized in a two-dimensional plane, the trained network virtually spreads vectors to the 2D plane,
479 allowing for simple visualization.
481 In addition to methods operating just upon the set of \emph{pattern vectors}, we have used and compared two methods
482 that approximate an unknown \emph{style vector} $\vec S$ of a player with known \emph{pattern vector} $\vec P$.
484 First of them is called $k$-Nearest Neighbor (kNN) classification.
485 Simpy said kNN approximates $\vec S$ by the ``average'' of \emph{style vectors} of $k$ nearest \emph{pattern vectors}.
486 The last method is based on the theory of Neural Networks -- networks of artificial neurons, that are used
487 for their generalization abilities. Neural network can learn correlations between input and output vectors and
488 generalize the ``knowledge'' to unknown vectors sufficiently good.
490 TODO rozdelit na algo/results??
492 \subsection{Principal Component Analysis}
493 \label{data-mining}
494 Principal Component Analysis \emph{PCA} \cite{Jolliffe1986} is a~method we use to reduce the dimensions of
495 \emph{pattern vectors} while preserving as much information as possible.
497 Shortly, PCA is an eigenvalue decomposition of a~covariance matrix of centered \emph{pattern vectors}.
498 It can be thought of as a~mapping $o$ from $n$-dimensional vector space to a~reduced $m$-dimensional vector space.
499 The base of this reduced vector space comprises $m$ eigenvectors of original vectors' covariance matrix.
500 We choose them to be the eigenvectors with biggest eigenvalues.
501 Ordered by decreasing eigenvalues, the eigenvectors form rows of the transformation matrix $W$.
503 Finally, we represent reduced \emph{pattern vectors} as a vector of coeficients of this eigenvector-base.
504 For each original \emph{pattern vector} $\vec p_i$, we obtain its new representation $\vec r_i$ as shown
505 in the following equation:
506 \begin{equation}
507 \vec r_i = W * \vec p_i
508 \end{equation}
510 The whole process is described in the Algorithm \ref{alg:pca}.
512 \begin{algorithm}
513 \caption{PCA -- Principal Component analysis}
514 \begin{algorithmic}
515 \label{alg:pca}
516 \REQUIRE{$m > 0$, set of players $R$ with \emph{pattern vectors} $p_r$}
517 \STATE $\vec \mu \leftarrow 1/|R| * \sum_{r \in R}{\vec p_r}$
518 \FOR{ $r \in R$}
519 \STATE $\vec p_r \leftarrow \vec p_r - \vec \mu$
520 \ENDFOR
521 \FOR{ $(i,j) \in \{1,... ,n\} \times \{1,... ,n\}$}
522 \STATE $\mathit{Cov}[i,j] \leftarrow 1/|R| * \sum_{r \in R}{\vec p_{ri} * \vec p_{rj}}$
523 \ENDFOR
524 \STATE Compute Eigenvalue Decomposition of $\mathit{Cov}$ matrix
525 \STATE Get $m$ biggest eigenvalues
526 \STATE According eigenvectors ordered by decreasing eigenvalues form rows of matrix $W$
527 \FOR{ $r \in R$}
528 \STATE $\vec r_r\leftarrow W \vec p_r$
529 \ENDFOR
530 \end{algorithmic}
531 \end{algorithm}
533 \subsection{Kohonen Maps}
534 \label{koh}
535 Kohonen map is a self-organizing network with neurons organized in a two-dimensional plane.
536 Neurons in the map compete for representation of input vector space. Each neuron $\vec n$ represents a vector
537 and the network is trained so that the neurons that are topologically close tend to represent vectors that
538 are close as well. That is realised as follows. Initially random network is sequentially trained;
539 in each iterations we choose a random training vector $\vec t$ and find neuron $\vec w$ that is closest
540 to $\vec t$ in Euclidean metric (we call $\vec w$ a \emph{winner neuron}).
542 We then adapt neurons from the neighbourhood of $\vec w$ employing an equation:
543 \begin{equation}
544 \vec n = \vec n + \alpha * \mathit{Influence}(\vec w, \vec n) * (\vec t - \vec n)
545 \end{equation}
546 where $\alpha$ is a learning parameter, usually decreasing in time. $Influence()$ is a function that forces neurons
547 to spread. Such function is usually realised using a mexican hat function or a difference-of-gaussians (see \cite{TODO}
548 for details). A state of the network can be valued by calculating mean square difference between each $\vec t \in T$
549 and its corresponding \emph{winner neuron} $\vec w_t$:
550 \begin{equation}
551 \mathit{Error}(N,T) = \sum_{\vec t \in T}{|\vec w_t - \vec t|}
552 \end{equation}
555 \begin{algorithm}
556 \caption{Kohonen maps -- training}
557 \begin{algorithmic}
558 \label{alg:koh}
559 \REQUIRE{Set of training vectors $T$, input dimension $D$}
560 \REQUIRE{max number of iterations $M$, desired error $E$}
561 \STATE $N \leftarrow \{\vec n | \vec n$ random, $\mathit{dim}(\vec n) = D\}$
562 \REPEAT
563 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
564 \STATE $\vec t \leftarrow \mathit{PickRandom}(T)$
565 \FORALL{$\vec n \in N$}
566 \STATE $D[\vec n] \leftarrow \mathit{EuclideanDistance}(\vec n, \vec t)$
567 \ENDFOR
568 \STATE Find $ \vec w \in N$ so that $D[\vec w] <= D[\vec m], \forall \vec m \in N$
569 \FORALL{$\vec n \in \mathit{TopologicalNeigbors}(N, \vec w)$}
570 \STATE $\vec n \leftarrow \vec n + \alpha(It) * \mathit{Influence}(\vec w, \vec n) * ( \vec t - \vec n ) $
571 \ENDFOR
572 \UNTIL{$\mathit{Error}(N, T) < E$ or $ \mathit{It} > M$}
573 \end{algorithmic}
574 \end{algorithm}
577 \subsection{k-nearest Neighbors Classifier}
578 \label{knn}
579 K-nearest neigbors is an essential classification technique.
580 We use it to approximate player's \emph{style vector} $\vec S$, assuming that his \emph{pattern vector} $\vec P$ is known.
581 To achieve this, we utilize \emph{reference style vectors} (see section \ref{style-vectors}).
583 The idea is based on a assumption that similarities in players' \emph{pattern vectors}
584 correlate with similarities in players' \emph{style vectors}. We try to approximate $\vec S$
585 as a weighted average of \emph{style vectors}
586 $\vec s_i$ of $k$ players with \emph{pattern vectors} $\vec p_i$ closest to $\vec P$.
587 This is illustrated in the Algorithm \ref{alg:knn}.
588 Note that the weight is a function of distance and it is not explicitly defined in Algorithm \ref{alg:knn}.
589 During our research, exponentialy decreasing weight has proven to be sufficient.
591 \begin{algorithm}
592 \caption{k-Nearest Neighbors}
593 \begin{algorithmic}
594 \label{alg:knn}
595 \REQUIRE{pattern vector $\vec P$, $k > 0$, set of reference players $R$}
596 \FORALL{$r \in R$ }
597 \STATE $D[r] \leftarrow \mathit{EuclideanDistance}(\vec p_r, \vec P)$
598 \ENDFOR
599 \STATE $N \leftarrow \mathit{SelectSmallest}(k, R, D)$
600 \STATE $\vec S \leftarrow \vec 0$
601 \FORALL{$r \in N $}
602 \STATE $\vec S \leftarrow \vec S + \mathit{Weight}(D[r]) * \vec s_r $
603 \ENDFOR
604 \end{algorithmic}
605 \end{algorithm}
607 \subsection{Neural Network Classifier}
608 \label{neural-net}
610 As an alternative to the k-Nearest Neigbors algorithm (section \ref{knn}), we have used
611 a classificator based on feed-forward artificial neural networks \cite{TODO}.
612 Neural networks (NN) are known for their ability to generalize and find correlations and patterns between
613 input and output data. Neural network is an adaptive system and it
614 must undergo a certain training before it can be reasonably used. Basically, we use
615 information for \emph{reference players} (for which either \emph{pattern vectors} and
616 \emph{style vectors} are known) as training data.
618 \subsubsection{Computation and activation of the NN}
619 Technically, neural network is a network of interconnected computational units called neurons.
620 A feedforward neural network has a layered topology; it usually has one \emph{input layer}, one \emph{output
621 layer} and an arbitrary number of \emph{hidden layers} between.
623 Each neuron $i$ is connected to all neurons in the previous layer and each connection has its weight $w_{ij}$
625 The computation proceeds in a discrete time steps.
626 In the first step, \emph{activation} of neurons in the \emph{input layer} is set according to the \emph{input vector}.
627 Then, we iteratively compute output of each neuron in next layer until the output layer is reached. The activity of
628 output layer is then presented as a result.
630 The activation $y_i$ of neuron $i$ from the layer $I$ is computed using the following equation:
631 \begin{equation}
632 y_i = f(\sum_{j \in J}{w_{ij} y_j})
633 \end{equation}
634 where $J$ is a previous layer, while $y_j$ is the activation for neurons from $J$ layer. Function $f()$ is
635 called \emph{activation function} and its purpose is to bound outputs of neurons. A typical example of an activation
636 function is a sigmoid function.\footnote{The sigmoid function is a special case of the logistic function; it is defined by the formula
637 $\sigma(x)=\frac{1}{1+e^{-(rx+k)}}$, parameters control the growth rate ($r$) and the x-position ($k$).}
639 \subsubsection{Training}
640 The training of the feedforward neural network usually involves some
641 modification of supervised Backpropagation learning algorithm. \cite{TODO}
642 We use first-order optimization algorithm called RPROP \cite{Riedmiller1993}.
644 Because the \emph{reference set} is not usually very large, we have devised a simple method for its extension.
645 This enhancement is based upon adding random linear combinations of \emph{style and pattern vectors} to the training set.
647 As insinuated above, the training set consist of pairs of input vectors (\emph{pattern vectors}) and
648 desired output vectors (\emph{style vectors}). The training set $T$ is then enlarged by adding linear combinations:
649 \begin{equation}
650 T_\mathit{base} = \{(\vec p_r, \vec s_r) | r \in R\}\\
651 \end{equation}
652 \begin{equation}
653 T_\mathit{ext} = \{(\vec p, \vec s) | \exists D \subseteq R : \vec p = \sum_{d \in D}{g_d \vec p_d}, \vec s = \sum_{d \in D}{g_d \vec s_d}\}
654 \end{equation}
655 TODO zabudovat $g_d$ dovnitr?
656 where $g_d, d \in D$ are random coeficients, so that $\sum_{d \in D}{g_d} = 1$. The training set
657 is then constructed as:
658 \begin{equation}
659 T = T_\mathit{base} \cup \mathit{SomeFiniteSubset}(T_\mathit{ext})
660 \end{equation}
662 The network is trained as shown in Algorithm \ref{alg:tnn}.
664 \begin{algorithm}
665 \caption{Training Neural Network}
666 \begin{algorithmic}
667 \label{alg:tnn}
668 \REQUIRE{Train set $T$, desired error $e$, max iterations $M$}
669 \STATE $N \leftarrow \mathit{RandomlyInitializedNetwork}()$
670 \STATE $\mathit{It} \leftarrow 0$
671 \REPEAT
672 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
673 \STATE $\Delta \vec w \leftarrow \vec 0$
674 \STATE $\mathit{TotalError} \leftarrow 0$
675 %\FORALL{$(\overrightarrow{Input}, \overrightarrow{DesiredOutput}) \in T$}
676 %\STATE $\overrightarrow{Output} \leftarrow Result(N, \overrightarrow{Input})$
677 %\STATE $E \leftarrow |\overrightarrow{DesiredOutput} - \overrightarrow{Output}|$
678 \FORALL{$(\mathit{Input}, \mathit{DesiredOutput}) \in T$}
679 \STATE $\mathit{Output} \leftarrow \mathit{Result}(N, \mathit{Input})$
680 \STATE $\mathit{Error} \leftarrow |\mathit{DesiredOutput} - \mathit{Output}|$
681 \STATE $\Delta \vec w \leftarrow \Delta \vec w + \mathit{WeightUpdate}(N,\mathit{Error})$
682 \STATE $\mathit{TotalError} \leftarrow \mathit{TotalError} + \mathit{Error}$
683 \ENDFOR
684 \STATE $N \leftarrow \mathit{ModifyWeights}(N, \Delta \vec w)$
685 \UNTIL{$\mathit{TotalError} < e$ or $ \mathit{It} > M$}
686 \end{algorithmic}
687 \end{algorithm}
690 \subsubsection{Architecture details}
691 TODO num layers, num neurons, ..
694 \subsection{Implementation}
696 We have implemented the data mining methods as an open-source project ``gostyle'' \cite{TODO},
697 licensed under GNU GPL.
698 PCA: In our implementation, we use a~library called MDP \cite{MDP}.
699 TODO libfann
702 \section{Strength Estimation Analysis}
704 PCA analysis yielded X, chi-square test blabla...
706 We then tried to apply the NN classifier with linear output function on the dataset
707 and that yielded Y (see fig. Z), with MSE abcd.
710 \section{Style Components Analysis}
712 PCA analysis yielded X, chi-square test...
714 We then tried to apply the NN classifier with linear output function on the dataset
715 and that yielded Y (see fig. Z), with MSE abcd.
718 \section{Proposed Applications}
723 % An example of a floating figure using the graphicx package.
724 % Note that \label must occur AFTER (or within) \caption.
725 % For figures, \caption should occur after the \includegraphics.
726 % Note that IEEEtran v1.7 and later has special internal code that
727 % is designed to preserve the operation of \label within \caption
728 % even when the captionsoff option is in effect. However, because
729 % of issues like this, it may be the safest practice to put all your
730 % \label just after \caption rather than within \caption{}.
732 % Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
733 % option should be used if it is desired that the figures are to be
734 % displayed while in draft mode.
736 %\begin{figure}[!t]
737 %\centering
738 %\includegraphics[width=2.5in]{myfigure}
739 % where an .eps filename suffix will be assumed under latex,
740 % and a .pdf suffix will be assumed for pdflatex; or what has been declared
741 % via \DeclareGraphicsExtensions.
742 %\caption{Simulation Results}
743 %\label{fig_sim}
744 %\end{figure}
746 % Note that IEEE typically puts floats only at the top, even when this
747 % results in a large percentage of a column being occupied by floats.
750 % An example of a double column floating figure using two subfigures.
751 % (The subfig.sty package must be loaded for this to work.)
752 % The subfigure \label commands are set within each subfloat command, the
753 % \label for the overall figure must come after \caption.
754 % \hfil must be used as a separator to get equal spacing.
755 % The subfigure.sty package works much the same way, except \subfigure is
756 % used instead of \subfloat.
758 %\begin{figure*}[!t]
759 %\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}%
760 %\label{fig_first_case}}
761 %\hfil
762 %\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}%
763 %\label{fig_second_case}}}
764 %\caption{Simulation results}
765 %\label{fig_sim}
766 %\end{figure*}
768 % Note that often IEEE papers with subfigures do not employ subfigure
769 % captions (using the optional argument to \subfloat), but instead will
770 % reference/describe all of them (a), (b), etc., within the main caption.
773 % An example of a floating table. Note that, for IEEE style tables, the
774 % \caption command should come BEFORE the table. Table text will default to
775 % \footnotesize as IEEE normally uses this smaller font for tables.
776 % The \label must come after \caption as always.
778 %\begin{table}[!t]
779 %% increase table row spacing, adjust to taste
780 %\renewcommand{\arraystretch}{1.3}
781 % if using array.sty, it might be a good idea to tweak the value of
782 % \extrarowheight as needed to properly center the text within the cells
783 %\caption{An Example of a Table}
784 %\label{table_example}
785 %\centering
786 %% Some packages, such as MDW tools, offer better commands for making tables
787 %% than the plain LaTeX2e tabular which is used here.
788 %\begin{tabular}{|c||c|}
789 %\hline
790 %One & Two\\
791 %\hline
792 %Three & Four\\
793 %\hline
794 %\end{tabular}
795 %\end{table}
798 % Note that IEEE does not put floats in the very first column - or typically
799 % anywhere on the first page for that matter. Also, in-text middle ("here")
800 % positioning is not used. Most IEEE journals use top floats exclusively.
801 % Note that, LaTeX2e, unlike IEEE journals, places footnotes above bottom
802 % floats. This can be corrected via the \fnbelowfloat command of the
803 % stfloats package.
807 \section{Conclusion}
808 The conclusion goes here.
809 We have shown brm and proposed brm.
811 Since we are not aware of any previous research on this topic and we
812 are limited by space and time constraints, plenty of research remains
813 to be done. There is plenty of room for further research in all parts
814 of our analysis --- different methods of generating the $\vec p$ vectors
815 can be explored; other data mining methods could be tried.
816 It can be argued that many players adjust their style by game conditions
817 (Go development era, handicap, komi and color, time limits, opponent)
818 or styles might express differently in various game stages.
819 Finally, more professional players could be consulted on the findings
820 and for style scales calibration.
822 TODO: Future research --- Sparse PCA
827 % if have a single appendix:
828 %\appendix[Proof of the Zonklar Equations]
829 % or
830 %\appendix % for no appendix heading
831 % do not use \section anymore after \appendix, only \section*
832 % is possibly needed
834 % use appendices with more than one appendix
835 % then use \section to start each appendix
836 % you must declare a \section before using any
837 % \subsection or using \label (\appendices by itself
838 % starts a section numbered zero.)
842 %\appendices
843 %\section{Proof of the First Zonklar Equation}
844 %Appendix one text goes here.
846 %% you can choose not to have a title for an appendix
847 %% if you want by leaving the argument blank
848 %\section{}
849 %Appendix two text goes here.
852 % use section* for acknowledgement
853 \section*{Acknowledgment}
854 \label{acknowledgement}
857 We would like to thank X for reviewing our paper.
858 We appreciate helpful comments on our general methodology
859 by John Fairbairn, T. M. Hall, Robert Jasiek
860 and several GoDiscussions.com users. \cite{GoDiscThread}
861 Finally, we are very grateful for ranking of go styles of selected professionals
862 by Alexander Dinerstein 3-pro, Motoki Noguchi 7-dan and Vit Brunner 4-dan.
865 % Can use something like this to put references on a page
866 % by themselves when using endfloat and the captionsoff option.
867 \ifCLASSOPTIONcaptionsoff
868 \newpage
873 % trigger a \newpage just before the given reference
874 % number - used to balance the columns on the last page
875 % adjust value as needed - may need to be readjusted if
876 % the document is modified later
877 %\IEEEtriggeratref{8}
878 % The "triggered" command can be changed if desired:
879 %\IEEEtriggercmd{\enlargethispage{-5in}}
881 % references section
883 % can use a bibliography generated by BibTeX as a .bbl file
884 % BibTeX documentation can be easily obtained at:
885 % http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
886 % The IEEEtran BibTeX style support page is at:
887 % http://www.michaelshell.org/tex/ieeetran/bibtex/
888 \bibliographystyle{IEEEtran}
889 % argument is your BibTeX string definitions and bibliography database(s)
890 \bibliography{gostyle}
892 % <OR> manually copy in the resultant .bbl file
893 % set second argument of \begin to the number of references
894 % (used to reserve space for the reference number labels box)
895 %\begin{thebibliography}{1}
897 %\bibitem{MasterMCTS}
899 %\end{thebibliography}
901 % biography section
903 % If you have an EPS/PDF photo (graphicx package needed) extra braces are
904 % needed around the contents of the optional argument to biography to prevent
905 % the LaTeX parser from getting confused when it sees the complicated
906 % \includegraphics command within an optional argument. (You could create
907 % your own custom macro containing the \includegraphics command to make things
908 % simpler here.)
909 %\begin{biography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{mshell}}]{Michael Shell}
910 % or if you just want to reserve a space for a photo:
912 \begin{IEEEbiography}{Michael Shell}
913 Biography text here.
914 \end{IEEEbiography}
916 % if you will not have a photo at all:
917 \begin{IEEEbiographynophoto}{John Doe}
918 Biography text here.
919 \end{IEEEbiographynophoto}
921 % insert where needed to balance the two columns on the last page with
922 % biographies
923 %\newpage
925 \begin{IEEEbiographynophoto}{Jane Doe}
926 Biography text here.
927 \end{IEEEbiographynophoto}
929 % You can push biographies down or up by placing
930 % a \vfill before or after them. The appropriate
931 % use of \vfill depends on what kind of text is
932 % on the last page and whether or not the columns
933 % are being equalized.
935 %\vfill
937 % Can be used to pull up biographies so that the bottom of the last one
938 % is flush with the other column.
939 %\enlargethispage{-5in}
943 % that's all folks
944 \end{document}