Merge branch 'master' of git+ssh://repo.or.cz/srv/git/gostyle
[gostyle.git] / tex / gostyle.tex
blobbb9e980df095f781c2d86ed28be9c298fb7f90b4
1 \documentclass[journal]{IEEEtran}
3 \usepackage{cite}
4 % cite.sty was written by Donald Arseneau
5 % V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
6 % \cite{} output to follow that of IEEE. Loading the cite package will
7 % result in citation numbers being automatically sorted and properly
8 % "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
9 % cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
10 % \cite will automatically add leading space, if needed. Use cite.sty's
11 % noadjust option (cite.sty V3.8 and later) if you want to turn this off.
12 % cite.sty is already installed on most LaTeX systems. Be sure and use
13 % version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
14 % not currently provide for hyperlinked citations.
15 % The latest version can be obtained at:
16 % http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
17 % The documentation is contained in the cite.sty file itself.
20 % *** GRAPHICS RELATED PACKAGES ***
22 \ifCLASSINFOpdf
23 % \usepackage[pdftex]{graphicx}
24 % declare the path(s) where your graphic files are
25 % \graphicspath{{../pdf/}{../jpeg/}}
26 % and their extensions so you won't have to specify these with
27 % every instance of \includegraphics
28 % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
29 \else
30 % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
31 % will default to the driver specified in the system graphics.cfg if no
32 % driver is specified.
33 % \usepackage[dvips]{graphicx}
34 % declare the path(s) where your graphic files are
35 % \graphicspath{{../eps/}}
36 % and their extensions so you won't have to specify these with
37 % every instance of \includegraphics
38 % \DeclareGraphicsExtensions{.eps}
39 \fi
41 \usepackage{algorithm}
42 \usepackage{algorithmic}
43 %\usepackage{algpseudocode}
44 % WICKED: nefunguje ani jedno???
45 % algorithmic.sty can be obtained at:
46 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
47 % There is also a support site at:
48 % http://algorithms.berlios.de/index.html
49 % Also of interest may be the (relatively newer and more customizable)
50 % algorithmicx.sty package by Szasz Janos:
51 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/
53 % *** ALIGNMENT PACKAGES ***
55 %\usepackage{array}
56 % http://www.ctan.org/tex-archive/macros/latex/required/tools/
59 \usepackage{amsmath}
60 %\usepackage{mdwtab}
61 % http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/
64 % IEEEtran contains the IEEEeqnarray family of commands that can be used to
65 % generate multiline equations as well as matrices, tables, etc., of high
66 % quality.
68 %\usepackage{eqparbox}
69 % Also of notable interest is Scott Pakin's eqparbox package for creating
70 % (automatically sized) equal width boxes - aka "natural width parboxes".
71 % Available at:
72 % http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/
76 % *** SUBFIGURE PACKAGES ***
77 %\usepackage[tight,footnotesize]{subfigure}
78 % subfigure.sty was written by Steven Douglas Cochran. This package makes it
79 % easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
80 % work, it is a good idea to load it with the tight package option to reduce
81 % the amount of white space around the subfigures. subfigure.sty is already
82 % installed on most LaTeX systems. The latest version and documentation can
83 % be obtained at:
84 % http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
85 % subfigure.sty has been superceeded by subfig.sty.
89 %\usepackage[caption=false]{caption}
90 %\usepackage[font=footnotesize]{subfig}
91 % subfig.sty, also written by Steven Douglas Cochran, is the modern
92 % replacement for subfigure.sty. However, subfig.sty requires and
93 % automatically loads Axel Sommerfeldt's caption.sty which will override
94 % IEEEtran.cls handling of captions and this will result in nonIEEE style
95 % figure/table captions. To prevent this problem, be sure and preload
96 % caption.sty with its "caption=false" package option. This is will preserve
97 % IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later
98 % (recommended due to many improvements over 1.2) of subfig.sty supports
99 % the caption=false option directly:
100 %\usepackage[caption=false,font=footnotesize]{subfig}
102 % The latest version and documentation can be obtained at:
103 % http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
104 % The latest version and documentation of caption.sty can be obtained at:
105 % http://www.ctan.org/tex-archive/macros/latex/contrib/caption/
109 % *** FLOAT PACKAGES ***
111 %\usepackage{fixltx2e}
112 % fixltx2e, the successor to the earlier fix2col.sty, was written by
113 % Frank Mittelbach and David Carlisle. This package corrects a few problems
114 % in the LaTeX2e kernel, the most notable of which is that in current
115 % LaTeX2e releases, the ordering of single and double column floats is not
116 % guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
117 % single column figure to be placed prior to an earlier double column
118 % figure. The latest version and documentation can be found at:
119 % http://www.ctan.org/tex-archive/macros/latex/base/
123 %\usepackage{stfloats}
124 % stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
125 % the ability to do double column floats at the bottom of the page as well
126 % as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
127 % LaTeX2e). It also provides a command:
128 %\fnbelowfloat
129 % to enable the placement of footnotes below bottom floats (the standard
130 % LaTeX2e kernel puts them above bottom floats). This is an invasive package
131 % which rewrites many portions of the LaTeX2e float routines. It may not work
132 % with other packages that modify the LaTeX2e float routines. The latest
133 % version and documentation can be obtained at:
134 % http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
135 % Documentation is contained in the stfloats.sty comments as well as in the
136 % presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
137 % does not allow \baselineskip to stretch. Authors submitting work to the
138 % IEEE should note that IEEE rarely uses double column equations and
139 % that authors should try to avoid such use. Do not be tempted to use the
140 % cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
141 % not format its papers in such ways.
144 %\ifCLASSOPTIONcaptionsoff
145 % \usepackage[nomarkers]{endfloat}
146 % \let\MYoriglatexcaption\caption
147 % \renewcommand{\caption}[2][\relax]{\MYoriglatexcaption[#2]{#2}}
148 %\fi
149 % endfloat.sty was written by James Darrell McCauley and Jeff Goldberg.
150 % This package may be useful when used in conjunction with IEEEtran.cls'
151 % captionsoff option. Some IEEE journals/societies require that submissions
152 % have lists of figures/tables at the end of the paper and that
153 % figures/tables without any captions are placed on a page by themselves at
154 % the end of the document. If needed, the draftcls IEEEtran class option or
155 % \CLASSINPUTbaselinestretch interface can be used to increase the line
156 % spacing as well. Be sure and use the nomarkers option of endfloat to
157 % prevent endfloat from "marking" where the figures would have been placed
158 % in the text. The two hack lines of code above are a slight modification of
159 % that suggested by in the endfloat docs (section 8.3.1) to ensure that
160 % the full captions always appear in the list of figures/tables - even if
161 % the user used the short optional argument of \caption[]{}.
162 % IEEE papers do not typically make use of \caption[]'s optional argument,
163 % so this should not be an issue. A similar trick can be used to disable
164 % captions of packages such as subfig.sty that lack options to turn off
165 % the subcaptions:
166 % For subfig.sty:
167 % \let\MYorigsubfloat\subfloat
168 % \renewcommand{\subfloat}[2][\relax]{\MYorigsubfloat[]{#2}}
169 % For subfigure.sty:
170 % \let\MYorigsubfigure\subfigure
171 % \renewcommand{\subfigure}[2][\relax]{\MYorigsubfigure[]{#2}}
172 % However, the above trick will not work if both optional arguments of
173 % the \subfloat/subfig command are used. Furthermore, there needs to be a
174 % description of each subfigure *somewhere* and endfloat does not add
175 % subfigure captions to its list of figures. Thus, the best approach is to
176 % avoid the use of subfigure captions (many IEEE journals avoid them anyway)
177 % and instead reference/explain all the subfigures within the main caption.
178 % The latest version of endfloat.sty and its documentation can obtained at:
179 % http://www.ctan.org/tex-archive/macros/latex/contrib/endfloat/
181 % The IEEEtran \ifCLASSOPTIONcaptionsoff conditional can also be used
182 % later in the document, say, to conditionally put the References on a
183 % page by themselves.
185 % *** PDF, URL AND HYPERLINK PACKAGES ***
187 %\usepackage{url}
188 % url.sty was written by Donald Arseneau. It provides better support for
189 % handling and breaking URLs. url.sty is already installed on most LaTeX
190 % systems. The latest version can be obtained at:
191 % http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
192 % Read the url.sty source comments for usage information. Basically,
193 % \url{my_url_here}.
196 % *** Do not adjust lengths that control margins, column widths, etc. ***
197 % *** Do not use packages that alter fonts (such as pslatex). ***
198 % There should be no need to do such things with IEEEtran.cls V1.6 and later.
199 % (Unless specifically asked to do so by the journal or conference you plan
200 % to submit to, of course. )
202 % correct bad hyphenation here
203 \hyphenation{op-tical net-works semi-conduc-tor}
206 \begin{document}
208 % paper title
209 % can use linebreaks \\ within to get better formatting as desired
210 \title{On Pattern Feature Trends in Large Go Game Corpus}
212 % use \thanks{} to gain access to the first footnote area
213 % a separate \thanks must be used for each paragraph as LaTeX2e's \thanks
214 % was not built to handle multiple paragraphs
215 \author{Petr~Baudis,~Josef~Moudrik% <-this % stops a space
216 \thanks{P. Baudis is student at the Faculty of Math and Physics, Charles University, Prague, CZ, and also does some of his Computer Go research as an employee of SUSE Labs Prague, Novell CZ.}% <-this % stops a space
217 \thanks{J. Moudrik is student at the Faculty of Math and Physics, Charles University, Prague, CZ.}}
219 % note the % following the last \IEEEmembership and also \thanks -
220 % these prevent an unwanted space from occurring between the last author name
221 % and the end of the author line. i.e., if you had this:
223 % \author{....lastname \thanks{...} \thanks{...} }
224 % ^------------^------------^----Do not want these spaces!
226 % a space would be appended to the last name and could cause every name on that
227 % line to be shifted left slightly. This is one of those "LaTeX things". For
228 % instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
229 % "AB" then you have to do: "\textbf{A}\textbf{B}"
230 % \thanks is no different in this regard, so shield the last } of each \thanks
231 % that ends a line with a % and do not let a space in before the next \thanks.
232 % Spaces after \IEEEmembership other than the last one are OK (and needed) as
233 % you are supposed to have spaces between the names. For what it is worth,
234 % this is a minor point as most people would not even notice if the said evil
235 % space somehow managed to creep in.
238 % The paper headers
239 \markboth{Transactions on Computational Intelligence and AI in Games}%
240 {On Pattern Feature Trends in Large Go Game Corpus}
241 % The only time the second header will appear is for the odd numbered pages
242 % after the title page when using the twoside option.
244 % *** Note that you probably will NOT want to include the author's ***
245 % *** name in the headers of peer review papers. ***
246 % You can use \ifCLASSOPTIONpeerreview for conditional compilation here if
247 % you desire.
252 % If you want to put a publisher's ID mark on the page you can do it like
253 % this:
254 %\IEEEpubid{0000--0000/00\$00.00~\copyright~2007 IEEE}
255 % Remember, if you use this you must call \IEEEpubidadjcol in the second
256 % column for its text to clear the IEEEpubid mark.
260 % use for special paper notices
261 %\IEEEspecialpapernotice{(Invited Paper)}
266 % make the title area
267 \maketitle
270 \begin{abstract}
271 %\boldmath
273 We process a~large corpus of game records of the board game of Go and
274 propose a~way to extract per-player summary information on played moves.
275 We then apply several basic data-mining methods on the summary
276 information to identify the most differentiating features within the
277 summary information, and discuss their correspondence with traditional
278 Go knowledge. We show mappings of the features to player attributes
279 like playing strength or informally perceived "playing style" (such as
280 territoriality or aggressivity), and propose applications including
281 seeding real-work ranks of internet players, aiding in Go study, or
282 contribution to discussion within Go theory on the scope of "playing
283 style".
285 \end{abstract}
286 % IEEEtran.cls defaults to using nonbold math in the Abstract.
287 % This preserves the distinction between vectors and scalars. However,
288 % if the journal you are submitting to favors bold math in the abstract,
289 % then you can use LaTeX's standard command \boldmath at the very start
290 % of the abstract to achieve this. Many IEEE journals frown on math
291 % in the abstract anyway.
293 % Note that keywords are not normally used for peerreview papers.
294 \begin{IEEEkeywords}
295 board games, go, data mining, player strength, playing style
296 \end{IEEEkeywords}
303 % For peer review papers, you can put extra information on the cover
304 % page as needed:
305 % \ifCLASSOPTIONpeerreview
306 % \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
307 % \fi
309 % For peerreview papers, this IEEEtran command inserts a page break and
310 % creates the second title. It will be ignored for other modes.
311 \IEEEpeerreviewmaketitle
315 \section{Introduction}
316 % The very first letter is a 2 line initial drop letter followed
317 % by the rest of the first word in caps.
319 % form to use if the first word consists of a single letter:
320 % \IEEEPARstart{A}{demo} file is ....
322 % form to use if you need the single drop letter followed by
323 % normal text (unknown if ever used by IEEE):
324 % \IEEEPARstart{A}{}demo file is ....
326 % Some journals put the first two words in caps:
327 % \IEEEPARstart{T}{his demo} file is ....
329 % Here we have the typical use of a "T" for an initial drop letter
330 % and "HIS" in caps to complete the first word.
331 \IEEEPARstart{T}{he} field of Computer Go usually focuses on the problem
332 of creating a~program to play the game, finding the best move from a~given
333 board position. We will make use of one method developed in the course
334 of such research and apply it to the analysis of existing game records
335 with the aim of helping humans to play the game better instead.
337 Go is a~two-player full-information board game played
338 on a~square grid (usually $19\times19$ lines) with black and white
339 stones; the goal of the game is to surround the most territory and
340 capture enemy stones. We assume basic familiarity with the game.
342 Many Go players are eager to play using computers (usually over
343 the internet) and review games played by others on computers as well.
344 This means that large amounts of game records are collected and digitally
345 stored, enabling easy processing of such collections. However, so far
346 only little has been done with the available data --- we are aware
347 only of uses for simple win/loss statistics (TODO: KGS Stats, KGS Analytics,
348 Pro Go Rating) and ''next move'' statistics on a~specific position (TODO:
349 Kombilo, Moyo Go Studio).
351 We present a~more in-depth approach --- from all played moves, we devise
352 a~compact evaluation of each player. We then explore correlations between
353 evaluations of various players in light of externally given information.
354 This way, we can discover similarity between moves characteristics of
355 players with the same playing strength, or discuss the meaning of the
356 "playing style" concept on the assumption that similar playing styles
357 should yield similar moves characteristics.
360 \section{Expert-based knowledge}
361 \label{style-vectors}
362 In order to provide a reference frame for our style analysis,
363 we have gathered some expert-based information about various
364 traditionally perceived style aspects.
365 Three high-level Go players (Alexander Dinerstein 3-pro, Motoki Noguchi
366 7-dan and Vit Brunner 4-dan) have judged style of several Go
367 professionals -- we call them \emph{reference playerse} -- chosen for both
368 being well-known within the community and having large number of played games in our collection.
370 This expert-based knowledge allows us to predict styles of unknown players based on
371 the similarity of their pattern vectors, as well as discover correlations between
372 styles and proportions of played patterns.
374 Experts were asked to assign each of player's style a number
375 on a scale from 1 to 10. These are interpreted
376 as shown in the table below.
378 \vspace{4mm}
379 \noindent
380 %\begin{table}
381 \begin{center}
382 %\caption{Styles}
383 \begin{tabular}{|c|c|c|}
384 \hline
385 \multicolumn{3}{|c|}{Styles} \\ \hline
386 Style & 1 & 10\\ \hline
387 Territoriality & Moyo & Territorial \\
388 Orthodoxity & Classic & Novel \\
389 Aggressivity & Calm & Figting \\
390 Thickness & Safe & Shinogi \\ \hline
391 \end{tabular}
392 \end{center}
393 %\end{table}
394 \vspace{4mm}
396 Averaging this expert based evaluation yields
397 \emph{reference style vector} $\vec s_r$ (of dimension $4$) for each player $r$
398 from the set of \emph{reference players} $R$.
400 %-- each with a \emph{pattern vector} $\vec p_i$ and \emph{style vector} $\vec s_i$.
402 \section{Data Extraction}
403 \label{pattern-vectors}
404 In addition to the explicit expert knowledge, we use the data obtained by...
406 TODO rozvest uvod, nemuze se zacinat jenom As the input...
408 As the input, we assume a~collection of game records\footnote{We
409 use the SGF format (TODO) in our implementation.} organized by player names.
410 We use two collections; the first one is GoGoD Winter 2009 (TODO) containing 42000 (TODO)
411 professional games, dating from the early Go history 1500 years ago to the present.
412 We use this collection for style analysis and detailed correlation analysis
413 of well-known Go professionals.
414 The other source is Go Teaching Ladder reviews (TODO). These include 7600 games
415 of players spanning over all strength levels; we use this collection
416 for finding correlations between moves of players of the same strength rank.
418 In order to generate the required compact description of most frequently played moves,
419 we construct a set of $n$ most occuring patterns (\emph{top patterns})
420 across all players and games from the database\footnote{We use $n=500$ in our analysis.}.
421 For each player, we then count how many times was each of those $n$ patterns played
422 during all his games and finally assign him a~{\em pattern vector} $\vec p$ of dimension $n$, with each
423 dimension corresponding to the relative number of occurences of a given pattern
424 (with respect to player's most played \emph{top pattern}). Using relative numbers of occurences ensures that
425 each dimension of player's \emph{pattern vector} is scaled to range $[0,1]$ and
426 therefore even players with different number of games in the database have comparable \emph{pattern vectors}.
428 \subsection{Pattern Features}
429 We need to define how to compose the patterns we use to describe moves.
430 There are some tradeoffs in play - overly general descriptions carry too few
431 information to discern various player attributes; too specific descriptions
432 gather too few specimen over the games sample and the vector differences are
433 not statistically significant.
435 We have chosen an intuitive and simple approach inspired by pattern features
436 used when computing ELO ratings for candidate patterns in Computer Go play.
437 \cite{ELO} Each pattern is a~combination of several {\em pattern features}
438 (name-value pairs) matched at the position of the played move.
439 We use these features:
441 \begin{itemize}
442 \item capture move flag
443 \item atari move flag
444 \item atari escape flag
445 \item contiguity-to-last flag --- whether the move has been played in one of 8 neighbors of the last move
446 \item contiguity-to-second-last flag
447 \item board edge distance --- only up to distance 4
448 \item spatial pattern --- configuration of stones around the played move
449 \end{itemize}
451 The spatial patterns are normalized (using a dictionary) to be always
452 black-to-play and maintain translational and rotational symmetry.
453 Configurations with diameter between 2 and 9 in the gridcular metric%
454 \footnote{The {\em gridcular} metric
455 $d(x,y) = |\delta x| + |\delta y| + \max(|\delta x|, |\delta y|)$ defines
456 a circle-like structure on the Go board square grid. \cite{SpatPat} }
457 are matched.
459 \subsection{Implementation}
461 We have implemented the data extraction by making use of the pattern
462 features matching implementation within the Pachi go-playing program
463 (TODO). We extract information on players by converting the SGF game
464 records to GTP (TODO) stream that feeds Pachi's {\tt patternscan}
465 engine which outputs a~single patternspec per move. We can then gather
466 all encountered patternspecs belonging to a~given player and summarize
467 them; the $\vec p$ vector then consists of normalized counts of
468 the given $n$ most frequent patternspecs.
471 \section{Data Mining}
472 \label{data-mining}
474 To assess the properties of gathered \emph{pattern vectors} and their influence on playing styles,
475 we have analysed the data by a~few basic data minining techniques.
477 First two methods rely purely on data gathered from the GoGoD database. Principal component
478 analysis finds orthogonal vector components that have biggest variance. Reversing the process then
479 indicates which patterns correlate with each style. Additionally, PCA can be used as a vector-preprocessing
480 for methods that are negatively sensitive to \emph{pattern vector} component correlations.
482 A~second method -- Kohonen's networks -- is based on the theory of self-organizing maps of neurons that
483 compete against each other for representation of the input space. Because neurons in the network are
484 organized in a two-dimensional plane, the trained network virtually spreads vectors to the 2D plane,
485 allowing for simple visualization.
487 In addition to methods operating just upon the set of \emph{pattern vectors}, we have used and compared two methods
488 that approximate an unknown \emph{style vector} $\vec S$ of a player with known \emph{pattern vector} $\vec P$.
490 First of them is called $k$-Nearest Neighbor (kNN) classification.
491 Simpy said kNN approximates $\vec S$ by the ``average'' of \emph{style vectors} of $k$ nearest \emph{pattern vectors}.
492 The last method is based on the theory of Neural Networks -- networks of artificial neurons, that are used
493 for their generalization abilities. Neural network can learn correlations between input and output vectors and
494 generalize the ``knowledge'' to unknown vectors sufficiently good.
496 TODO rozdelit na algo/results??
498 \subsection{Principal Component Analysis}
499 \label{data-mining}
500 Principal Component Analysis \emph{PCA} \cite{Jolliffe1986} is a~method we use to reduce the dimensions of
501 \emph{pattern vectors} while preserving as much information as possible.
503 Shortly, PCA is an eigenvalue decomposition of a~covariance matrix of centered \emph{pattern vectors}.
504 It can be thought of as a~mapping $o$ from $n$-dimensional vector space to a~reduced $m$-dimensional vector space.
505 The base of this reduced vector space comprises $m$ eigenvectors of original vectors' covariance matrix.
506 We choose them to be the eigenvectors with biggest eigenvalues.
507 Ordered by decreasing eigenvalues, the eigenvectors form rows of the transformation matrix $W$.
509 Finally, we represent reduced \emph{pattern vectors} as a vector of coeficients of this eigenvector-base.
510 For each original \emph{pattern vector} $\vec p_i$, we obtain its new representation $\vec r_i$ as shown
511 in the following equation:
512 \begin{equation}
513 \vec r_i = W \cdot \vec p_i
514 \end{equation}
516 The whole process is described in the Algorithm \ref{alg:pca}.
518 \begin{algorithm}
519 \caption{PCA -- Principal Component analysis}
520 \begin{algorithmic}
521 \label{alg:pca}
522 \REQUIRE{$m > 0$, set of players $R$ with \emph{pattern vectors} $p_r$}
523 \STATE $\vec \mu \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_r}$
524 \FOR{ $r \in R$}
525 \STATE $\vec p_r \leftarrow \vec p_r - \vec \mu$
526 \ENDFOR
527 \FOR{ $(i,j) \in \{1,... ,n\} \times \{1,... ,n\}$}
528 \STATE $\mathit{Cov}[i,j] \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_{ri} \cdot \vec p_{rj}}$
529 \ENDFOR
530 \STATE Compute Eigenvalue Decomposition of $\mathit{Cov}$ matrix
531 \STATE Get $m$ biggest eigenvalues
532 \STATE According eigenvectors ordered by decreasing eigenvalues form rows of matrix $W$
533 \FOR{ $r \in R$}
534 \STATE $\vec r_r\leftarrow W \vec p_r$
535 \ENDFOR
536 \end{algorithmic}
537 \end{algorithm}
539 \subsection{Kohonen Maps}
540 \label{koh}
541 Kohonen map is a self-organizing network with neurons organized in a two-dimensional plane.
542 Neurons in the map compete for representation of input vector space. Each neuron $\vec n$ represents a vector
543 and the network is trained so that the neurons that are topologically close tend to represent vectors that
544 are close as well. That is realised as follows. Initially random network is sequentially trained;
545 in each iterations we choose a random training vector $\vec t$ and find neuron $\vec w$ that is closest
546 to $\vec t$ in Euclidean metric (we call $\vec w$ a \emph{winner neuron}).
548 We then adapt neurons from the neighbourhood of $\vec w$ employing an equation:
549 \begin{equation}
550 \vec n = \vec n + \alpha \cdot \mathit{Influence}(\vec w, \vec n) \cdot (\vec t - \vec n)
551 \end{equation}
552 where $\alpha$ is a learning parameter, usually decreasing in time. $Influence()$ is a function that forces neurons
553 to spread. Such function is usually realised using a mexican hat function or a difference-of-gaussians (see \cite{TODO}
554 for details). A state of the network can be valued by calculating mean square difference between each $\vec t \in T$
555 and its corresponding \emph{winner neuron} $\vec w_t$:
556 \begin{equation}
557 \mathit{Error}(N,T) = \sum_{\vec t \in T}{|\vec w_t - \vec t|}
558 \end{equation}
561 \begin{algorithm}
562 \caption{Kohonen maps -- training}
563 \begin{algorithmic}
564 \label{alg:koh}
565 \REQUIRE{Set of training vectors $T$, input dimension $D$}
566 \REQUIRE{max number of iterations $M$, desired error $E$}
567 \STATE $N \leftarrow \{\vec n | \vec n$ random, $\mathit{dim}(\vec n) = D\}$
568 \REPEAT
569 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
570 \STATE $\vec t \leftarrow \mathit{PickRandom}(T)$
571 \FORALL{$\vec n \in N$}
572 \STATE $D[\vec n] \leftarrow \mathit{EuclideanDistance}(\vec n, \vec t)$
573 \ENDFOR
574 \STATE Find $ \vec w \in N$ so that $D[\vec w] <= D[\vec m], \forall \vec m \in N$
575 \FORALL{$\vec n \in \mathit{TopologicalNeigbors}(N, \vec w)$}
576 \STATE $\vec n \leftarrow \vec n + \alpha(It) \cdot \mathit{Influence}(\vec w, \vec n) \cdot ( \vec t - \vec n ) $
577 \ENDFOR
578 \UNTIL{$\mathit{Error}(N, T) < E$ or $ \mathit{It} > M$}
579 \end{algorithmic}
580 \end{algorithm}
583 \subsection{k-nearest Neighbors Classifier}
584 \label{knn}
585 K-nearest neigbors is an essential classification technique.
586 We use it to approximate player's \emph{style vector} $\vec S$, assuming that his \emph{pattern vector} $\vec P$ is known.
587 To achieve this, we utilize \emph{reference style vectors} (see section \ref{style-vectors}).
589 The idea is based on a assumption that similarities in players' \emph{pattern vectors}
590 correlate with similarities in players' \emph{style vectors}. We try to approximate $\vec S$
591 as a weighted average of \emph{style vectors}
592 $\vec s_i$ of $k$ players with \emph{pattern vectors} $\vec p_i$ closest to $\vec P$.
593 This is illustrated in the Algorithm \ref{alg:knn}.
594 Note that the weight is a function of distance and it is not explicitly defined in Algorithm \ref{alg:knn}.
595 During our research, exponentialy decreasing weight has proven to be sufficient.
597 \begin{algorithm}
598 \caption{k-Nearest Neighbors}
599 \begin{algorithmic}
600 \label{alg:knn}
601 \REQUIRE{pattern vector $\vec P$, $k > 0$, set of reference players $R$}
602 \FORALL{$r \in R$ }
603 \STATE $D[r] \leftarrow \mathit{EuclideanDistance}(\vec p_r, \vec P)$
604 \ENDFOR
605 \STATE $N \leftarrow \mathit{SelectSmallest}(k, R, D)$
606 \STATE $\vec S \leftarrow \vec 0$
607 \FORALL{$r \in N $}
608 \STATE $\vec S \leftarrow \vec S + \mathit{Weight}(D[r]) \cdot \vec s_r $
609 \ENDFOR
610 \end{algorithmic}
611 \end{algorithm}
613 \subsection{Neural Network Classifier}
614 \label{neural-net}
616 As an alternative to the k-Nearest Neigbors algorithm (section \ref{knn}), we have used
617 a classificator based on feed-forward artificial neural networks \cite{TODO}.
618 Neural networks (NN) are known for their ability to generalize and find correlations and patterns between
619 input and output data. Neural network is an adaptive system and it
620 must undergo a certain training before it can be reasonably used. Basically, we use
621 information for \emph{reference players} (for which either \emph{pattern vectors} and
622 \emph{style vectors} are known) as training data.
624 \subsubsection{Computation and activation of the NN}
625 Technically, neural network is a network of interconnected computational units called neurons.
626 A feedforward neural network has a layered topology; it usually has one \emph{input layer}, one \emph{output
627 layer} and an arbitrary number of \emph{hidden layers} between.
629 Each neuron $i$ is connected to all neurons in the previous layer and each connection has its weight $w_{ij}$
631 The computation proceeds in a discrete time steps.
632 In the first step, \emph{activation} of neurons in the \emph{input layer} is set according to the \emph{input vector}.
633 Then, we iteratively compute output of each neuron in next layer until the output layer is reached. The activity of
634 output layer is then presented as a result.
636 The activation $y_i$ of neuron $i$ from the layer $I$ is computed using the following equation:
637 \begin{equation}
638 y_i = f(\sum_{j \in J}{w_{ij} y_j})
639 \end{equation}
640 where $J$ is a previous layer, while $y_j$ is the activation for neurons from $J$ layer. Function $f()$ is
641 called \emph{activation function} and its purpose is to bound outputs of neurons. A typical example of an activation
642 function is a sigmoid function.\footnote{The sigmoid function is a special case of the logistic function; it is defined by the formula
643 $\sigma(x)=\frac{1}{1+e^{-(rx+k)}}$, parameters control the growth rate ($r$) and the x-position ($k$).}
645 \subsubsection{Training}
646 The training of the feedforward neural network usually involves some
647 modification of supervised Backpropagation learning algorithm. \cite{TODO}
648 We use first-order optimization algorithm called RPROP \cite{Riedmiller1993}.
650 Because the \emph{reference set} is not usually very large, we have devised a simple method for its extension.
651 This enhancement is based upon adding random linear combinations of \emph{style and pattern vectors} to the training set.
653 As insinuated above, the training set consist of pairs of input vectors (\emph{pattern vectors}) and
654 desired output vectors (\emph{style vectors}). The training set $T$ is then enlarged by adding linear combinations:
655 \begin{equation}
656 T_\mathit{base} = \{(\vec p_r, \vec s_r) | r \in R\}\\
657 \end{equation}
658 \begin{equation}
659 T_\mathit{ext} = \{(\vec p, \vec s) | \exists D \subseteq R : \vec p = \sum_{d \in D}{g_d \vec p_d}, \vec s = \sum_{d \in D}{g_d \vec s_d}\}
660 \end{equation}
661 TODO zabudovat $g_d$ dovnitr?
662 where $g_d, d \in D$ are random coeficients, so that $\sum_{d \in D}{g_d} = 1$. The training set
663 is then constructed as:
664 \begin{equation}
665 T = T_\mathit{base} \cup \mathit{SomeFiniteSubset}(T_\mathit{ext})
666 \end{equation}
668 The network is trained as shown in Algorithm \ref{alg:tnn}.
670 \begin{algorithm}
671 \caption{Training Neural Network}
672 \begin{algorithmic}
673 \label{alg:tnn}
674 \REQUIRE{Train set $T$, desired error $e$, max iterations $M$}
675 \STATE $N \leftarrow \mathit{RandomlyInitializedNetwork}()$
676 \STATE $\mathit{It} \leftarrow 0$
677 \REPEAT
678 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
679 \STATE $\Delta \vec w \leftarrow \vec 0$
680 \STATE $\mathit{TotalError} \leftarrow 0$
681 %\FORALL{$(\overrightarrow{Input}, \overrightarrow{DesiredOutput}) \in T$}
682 %\STATE $\overrightarrow{Output} \leftarrow Result(N, \overrightarrow{Input})$
683 %\STATE $E \leftarrow |\overrightarrow{DesiredOutput} - \overrightarrow{Output}|$
684 \FORALL{$(\mathit{Input}, \mathit{DesiredOutput}) \in T$}
685 \STATE $\mathit{Output} \leftarrow \mathit{Result}(N, \mathit{Input})$
686 \STATE $\mathit{Error} \leftarrow |\mathit{DesiredOutput} - \mathit{Output}|$
687 \STATE $\Delta \vec w \leftarrow \Delta \vec w + \mathit{WeightUpdate}(N,\mathit{Error})$
688 \STATE $\mathit{TotalError} \leftarrow \mathit{TotalError} + \mathit{Error}$
689 \ENDFOR
690 \STATE $N \leftarrow \mathit{ModifyWeights}(N, \Delta \vec w)$
691 \UNTIL{$\mathit{TotalError} < e$ or $ \mathit{It} > M$}
692 \end{algorithmic}
693 \end{algorithm}
696 \subsubsection{Architecture details}
697 TODO num layers, num neurons, ..
700 \subsection{Implementation}
702 We have implemented the data mining methods as an open-source project ``gostyle'' \cite{TODO},
703 licensed under GNU GPL.
704 PCA: In our implementation, we use a~library called MDP \cite{MDP}.
705 TODO libfann
708 \section{Strength Estimation Analysis}
710 PCA analysis yielded X, chi-square test blabla...
712 We then tried to apply the NN classifier with linear output function on the dataset
713 and that yielded Y (see fig. Z), with MSE abcd.
716 \section{Style Components Analysis}
718 PCA analysis yielded X, chi-square test...
720 We then tried to apply the NN classifier with linear output function on the dataset
721 and that yielded Y (see fig. Z), with MSE abcd.
724 \section{Proposed Applications}
729 % An example of a floating figure using the graphicx package.
730 % Note that \label must occur AFTER (or within) \caption.
731 % For figures, \caption should occur after the \includegraphics.
732 % Note that IEEEtran v1.7 and later has special internal code that
733 % is designed to preserve the operation of \label within \caption
734 % even when the captionsoff option is in effect. However, because
735 % of issues like this, it may be the safest practice to put all your
736 % \label just after \caption rather than within \caption{}.
738 % Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
739 % option should be used if it is desired that the figures are to be
740 % displayed while in draft mode.
742 %\begin{figure}[!t]
743 %\centering
744 %\includegraphics[width=2.5in]{myfigure}
745 % where an .eps filename suffix will be assumed under latex,
746 % and a .pdf suffix will be assumed for pdflatex; or what has been declared
747 % via \DeclareGraphicsExtensions.
748 %\caption{Simulation Results}
749 %\label{fig_sim}
750 %\end{figure}
752 % Note that IEEE typically puts floats only at the top, even when this
753 % results in a large percentage of a column being occupied by floats.
756 % An example of a double column floating figure using two subfigures.
757 % (The subfig.sty package must be loaded for this to work.)
758 % The subfigure \label commands are set within each subfloat command, the
759 % \label for the overall figure must come after \caption.
760 % \hfil must be used as a separator to get equal spacing.
761 % The subfigure.sty package works much the same way, except \subfigure is
762 % used instead of \subfloat.
764 %\begin{figure*}[!t]
765 %\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}%
766 %\label{fig_first_case}}
767 %\hfil
768 %\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}%
769 %\label{fig_second_case}}}
770 %\caption{Simulation results}
771 %\label{fig_sim}
772 %\end{figure*}
774 % Note that often IEEE papers with subfigures do not employ subfigure
775 % captions (using the optional argument to \subfloat), but instead will
776 % reference/describe all of them (a), (b), etc., within the main caption.
779 % An example of a floating table. Note that, for IEEE style tables, the
780 % \caption command should come BEFORE the table. Table text will default to
781 % \footnotesize as IEEE normally uses this smaller font for tables.
782 % The \label must come after \caption as always.
784 %\begin{table}[!t]
785 %% increase table row spacing, adjust to taste
786 %\renewcommand{\arraystretch}{1.3}
787 % if using array.sty, it might be a good idea to tweak the value of
788 % \extrarowheight as needed to properly center the text within the cells
789 %\caption{An Example of a Table}
790 %\label{table_example}
791 %\centering
792 %% Some packages, such as MDW tools, offer better commands for making tables
793 %% than the plain LaTeX2e tabular which is used here.
794 %\begin{tabular}{|c||c|}
795 %\hline
796 %One & Two\\
797 %\hline
798 %Three & Four\\
799 %\hline
800 %\end{tabular}
801 %\end{table}
804 % Note that IEEE does not put floats in the very first column - or typically
805 % anywhere on the first page for that matter. Also, in-text middle ("here")
806 % positioning is not used. Most IEEE journals use top floats exclusively.
807 % Note that, LaTeX2e, unlike IEEE journals, places footnotes above bottom
808 % floats. This can be corrected via the \fnbelowfloat command of the
809 % stfloats package.
813 \section{Conclusion}
814 The conclusion goes here.
815 We have shown brm and proposed brm.
817 Since we are not aware of any previous research on this topic and we
818 are limited by space and time constraints, plenty of research remains
819 to be done. There is plenty of room for further research in all parts
820 of our analysis --- different methods of generating the $\vec p$ vectors
821 can be explored; other data mining methods could be tried.
822 It can be argued that many players adjust their style by game conditions
823 (Go development era, handicap, komi and color, time limits, opponent)
824 or styles might express differently in various game stages.
825 Finally, more professional players could be consulted on the findings
826 and for style scales calibration.
828 TODO: Future research --- Sparse PCA
833 % if have a single appendix:
834 %\appendix[Proof of the Zonklar Equations]
835 % or
836 %\appendix % for no appendix heading
837 % do not use \section anymore after \appendix, only \section*
838 % is possibly needed
840 % use appendices with more than one appendix
841 % then use \section to start each appendix
842 % you must declare a \section before using any
843 % \subsection or using \label (\appendices by itself
844 % starts a section numbered zero.)
848 %\appendices
849 %\section{Proof of the First Zonklar Equation}
850 %Appendix one text goes here.
852 %% you can choose not to have a title for an appendix
853 %% if you want by leaving the argument blank
854 %\section{}
855 %Appendix two text goes here.
858 % use section* for acknowledgement
859 \section*{Acknowledgment}
860 \label{acknowledgement}
863 We would like to thank X for reviewing our paper.
864 We appreciate helpful comments on our general methodology
865 by John Fairbairn, T. M. Hall, Robert Jasiek
866 and several GoDiscussions.com users. \cite{GoDiscThread}
867 Finally, we are very grateful for ranking of go styles of selected professionals
868 by Alexander Dinerstein 3-pro, Motoki Noguchi 7-dan and Vit Brunner 4-dan.
871 % Can use something like this to put references on a page
872 % by themselves when using endfloat and the captionsoff option.
873 \ifCLASSOPTIONcaptionsoff
874 \newpage
879 % trigger a \newpage just before the given reference
880 % number - used to balance the columns on the last page
881 % adjust value as needed - may need to be readjusted if
882 % the document is modified later
883 %\IEEEtriggeratref{8}
884 % The "triggered" command can be changed if desired:
885 %\IEEEtriggercmd{\enlargethispage{-5in}}
887 % references section
889 % can use a bibliography generated by BibTeX as a .bbl file
890 % BibTeX documentation can be easily obtained at:
891 % http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
892 % The IEEEtran BibTeX style support page is at:
893 % http://www.michaelshell.org/tex/ieeetran/bibtex/
894 \bibliographystyle{IEEEtran}
895 % argument is your BibTeX string definitions and bibliography database(s)
896 \bibliography{gostyle}
898 % <OR> manually copy in the resultant .bbl file
899 % set second argument of \begin to the number of references
900 % (used to reserve space for the reference number labels box)
901 %\begin{thebibliography}{1}
903 %\bibitem{MasterMCTS}
905 %\end{thebibliography}
907 % biography section
909 % If you have an EPS/PDF photo (graphicx package needed) extra braces are
910 % needed around the contents of the optional argument to biography to prevent
911 % the LaTeX parser from getting confused when it sees the complicated
912 % \includegraphics command within an optional argument. (You could create
913 % your own custom macro containing the \includegraphics command to make things
914 % simpler here.)
915 %\begin{biography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{mshell}}]{Michael Shell}
916 % or if you just want to reserve a space for a photo:
918 \begin{IEEEbiography}{Michael Shell}
919 Biography text here.
920 \end{IEEEbiography}
922 % if you will not have a photo at all:
923 \begin{IEEEbiographynophoto}{John Doe}
924 Biography text here.
925 \end{IEEEbiographynophoto}
927 % insert where needed to balance the two columns on the last page with
928 % biographies
929 %\newpage
931 \begin{IEEEbiographynophoto}{Jane Doe}
932 Biography text here.
933 \end{IEEEbiographynophoto}
935 % You can push biographies down or up by placing
936 % a \vfill before or after them. The appropriate
937 % use of \vfill depends on what kind of text is
938 % on the last page and whether or not the columns
939 % are being equalized.
941 %\vfill
943 % Can be used to pull up biographies so that the bottom of the last one
944 % is flush with the other column.
945 %\enlargethispage{-5in}
949 % that's all folks
950 \end{document}