tex: Strength Estimator cleanups
[gostyle.git] / tex / gostyle.tex
blob2632628149b8f26bf933517900eb4e3c790c56f1
1 \documentclass[journal]{IEEEtran}
3 \usepackage{cite}
4 % cite.sty was written by Donald Arseneau
5 % V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
6 % \cite{} output to follow that of IEEE. Loading the cite package will
7 % result in citation numbers being automatically sorted and properly
8 % "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
9 % cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
10 % \cite will automatically add leading space, if needed. Use cite.sty's
11 % noadjust option (cite.sty V3.8 and later) if you want to turn this off.
12 % cite.sty is already installed on most LaTeX systems. Be sure and use
13 % version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
14 % not currently provide for hyperlinked citations.
15 % The latest version can be obtained at:
16 % http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
17 % The documentation is contained in the cite.sty file itself.
20 % *** GRAPHICS RELATED PACKAGES ***
22 \ifCLASSINFOpdf
23 % \usepackage[pdftex]{graphicx}
24 % declare the path(s) where your graphic files are
25 % \graphicspath{{../pdf/}{../jpeg/}}
26 % and their extensions so you won't have to specify these with
27 % every instance of \includegraphics
28 % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
29 \else
30 % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
31 % will default to the driver specified in the system graphics.cfg if no
32 % driver is specified.
33 % \usepackage[dvips]{graphicx}
34 \usepackage{graphicx}
35 % declare the path(s) where your graphic files are
36 % \graphicspath{{../eps/}}
37 % and their extensions so you won't have to specify these with
38 % every instance of \includegraphics
39 % \DeclareGraphicsExtensions{.eps}
40 \fi
42 \usepackage{threeparttable}
44 \usepackage{psgo}
45 \setgounit{0.4cm}
47 \usepackage{algorithm}
48 \usepackage{algorithmic}
49 %\usepackage{algpseudocode}
50 % WICKED: nefunguje ani jedno???
51 % algorithmic.sty can be obtained at:
52 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
53 % There is also a support site at:
54 % http://algorithms.berlios.de/index.html
55 % Also of interest may be the (relatively newer and more customizable)
56 % algorithmicx.sty package by Szasz Janos:
57 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/
59 % *** ALIGNMENT PACKAGES ***
61 %\usepackage{array}
62 % http://www.ctan.org/tex-archive/macros/latex/required/tools/
65 \usepackage{amsmath}
66 %\usepackage{mdwtab}
67 % http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/
70 % IEEEtran contains the IEEEeqnarray family of commands that can be used to
71 % generate multiline equations as well as matrices, tables, etc., of high
72 % quality.
74 %\usepackage{eqparbox}
75 % Also of notable interest is Scott Pakin's eqparbox package for creating
76 % (automatically sized) equal width boxes - aka "natural width parboxes".
77 % Available at:
78 % http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/
82 % *** SUBFIGURE PACKAGES ***
83 %\usepackage[tight,footnotesize]{subfigure}
84 % subfigure.sty was written by Steven Douglas Cochran. This package makes it
85 % easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
86 % work, it is a good idea to load it with the tight package option to reduce
87 % the amount of white space around the subfigures. subfigure.sty is already
88 % installed on most LaTeX systems. The latest version and documentation can
89 % be obtained at:
90 % http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
91 % subfigure.sty has been superceeded by subfig.sty.
95 %\usepackage[caption=false]{caption}
96 %\usepackage[font=footnotesize]{subfig}
97 % subfig.sty, also written by Steven Douglas Cochran, is the modern
98 % replacement for subfigure.sty. However, subfig.sty requires and
99 % automatically loads Axel Sommerfeldt's caption.sty which will override
100 % IEEEtran.cls handling of captions and this will result in nonIEEE style
101 % figure/table captions. To prevent this problem, be sure and preload
102 % caption.sty with its "caption=false" package option. This is will preserve
103 % IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later
104 % (recommended due to many improvements over 1.2) of subfig.sty supports
105 % the caption=false option directly:
106 %\usepackage[caption=false,font=footnotesize]{subfig}
108 % The latest version and documentation can be obtained at:
109 % http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
110 % The latest version and documentation of caption.sty can be obtained at:
111 % http://www.ctan.org/tex-archive/macros/latex/contrib/caption/
115 % *** FLOAT PACKAGES ***
117 %\usepackage{fixltx2e}
118 % fixltx2e, the successor to the earlier fix2col.sty, was written by
119 % Frank Mittelbach and David Carlisle. This package corrects a few problems
120 % in the LaTeX2e kernel, the most notable of which is that in current
121 % LaTeX2e releases, the ordering of single and double column floats is not
122 % guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
123 % single column figure to be placed prior to an earlier double column
124 % figure. The latest version and documentation can be found at:
125 % http://www.ctan.org/tex-archive/macros/latex/base/
129 %\usepackage{stfloats}
130 % stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
131 % the ability to do double column floats at the bottom of the page as well
132 % as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
133 % LaTeX2e). It also provides a command:
134 %\fnbelowfloat
135 % to enable the placement of footnotes below bottom floats (the standard
136 % LaTeX2e kernel puts them above bottom floats). This is an invasive package
137 % which rewrites many portions of the LaTeX2e float routines. It may not work
138 % with other packages that modify the LaTeX2e float routines. The latest
139 % version and documentation can be obtained at:
140 % http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
141 % Documentation is contained in the stfloats.sty comments as well as in the
142 % presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
143 % does not allow \baselineskip to stretch. Authors submitting work to the
144 % IEEE should note that IEEE rarely uses double column equations and
145 % that authors should try to avoid such use. Do not be tempted to use the
146 % cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
147 % not format its papers in such ways.
150 %\ifCLASSOPTIONcaptionsoff
151 % \usepackage[nomarkers]{endfloat}
152 % \let\MYoriglatexcaption\caption
153 % \renewcommand{\caption}[2][\relax]{\MYoriglatexcaption[#2]{#2}}
154 %\fi
155 % endfloat.sty was written by James Darrell McCauley and Jeff Goldberg.
156 % This package may be useful when used in conjunction with IEEEtran.cls'
157 % captionsoff option. Some IEEE journals/societies require that submissions
158 % have lists of figures/tables at the end of the paper and that
159 % figures/tables without any captions are placed on a page by themselves at
160 % the end of the document. If needed, the draftcls IEEEtran class option or
161 % \CLASSINPUTbaselinestretch interface can be used to increase the line
162 % spacing as well. Be sure and use the nomarkers option of endfloat to
163 % prevent endfloat from "marking" where the figures would have been placed
164 % in the text. The two hack lines of code above are a slight modification of
165 % that suggested by in the endfloat docs (section 8.3.1) to ensure that
166 % the full captions always appear in the list of figures/tables - even if
167 % the user used the short optional argument of \caption[]{}.
168 % IEEE papers do not typically make use of \caption[]'s optional argument,
169 % so this should not be an issue. A similar trick can be used to disable
170 % captions of packages such as subfig.sty that lack options to turn off
171 % the subcaptions:
172 % For subfig.sty:
173 % \let\MYorigsubfloat\subfloat
174 % \renewcommand{\subfloat}[2][\relax]{\MYorigsubfloat[]{#2}}
175 % For subfigure.sty:
176 % \let\MYorigsubfigure\subfigure
177 % \renewcommand{\subfigure}[2][\relax]{\MYorigsubfigure[]{#2}}
178 % However, the above trick will not work if both optional arguments of
179 % the \subfloat/subfig command are used. Furthermore, there needs to be a
180 % description of each subfigure *somewhere* and endfloat does not add
181 % subfigure captions to its list of figures. Thus, the best approach is to
182 % avoid the use of subfigure captions (many IEEE journals avoid them anyway)
183 % and instead reference/explain all the subfigures within the main caption.
184 % The latest version of endfloat.sty and its documentation can obtained at:
185 % http://www.ctan.org/tex-archive/macros/latex/contrib/endfloat/
187 % The IEEEtran \ifCLASSOPTIONcaptionsoff conditional can also be used
188 % later in the document, say, to conditionally put the References on a
189 % page by themselves.
191 % *** PDF, URL AND HYPERLINK PACKAGES ***
193 \usepackage{url}
194 % url.sty was written by Donald Arseneau. It provides better support for
195 % handling and breaking URLs. url.sty is already installed on most LaTeX
196 % systems. The latest version can be obtained at:
197 % http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
198 % Read the url.sty source comments for usage information. Basically,
199 % \url{my_url_here}.
202 % *** Do not adjust lengths that control margins, column widths, etc. ***
203 % *** Do not use packages that alter fonts (such as pslatex). ***
204 % There should be no need to do such things with IEEEtran.cls V1.6 and later.
205 % (Unless specifically asked to do so by the journal or conference you plan
206 % to submit to, of course. )
208 % correct bad hyphenation here
209 \hyphenation{op-tical net-works semi-conduc-tor know-ledge}
212 \begin{document}
214 % paper title
215 % can use linebreaks \\ within to get better formatting as desired
216 \title{On Move Pattern Trends\\in Large Go Games Corpus}
218 % use \thanks{} to gain access to the first footnote area
219 % a separate \thanks must be used for each paragraph as LaTeX2e's \thanks
220 % was not built to handle multiple paragraphs
221 \author{Petr~Baudi\v{s},~Josef~Moud\v{r}\'{i}k% <-this % stops a space
222 \thanks{P. Baudi\v{s} is student at the Faculty of Math and Physics, Charles University, Prague, CZ, and also does some of his Computer Go research as an employee of SUSE Labs Prague, Novell CZ.}% <-this % stops a space
223 \thanks{J. Moud\v{r}\'{i}k is student at the Faculty of Math and Physics, Charles University, Prague, CZ.}}
225 % note the % following the last \IEEEmembership and also \thanks -
226 % these prevent an unwanted space from occurring between the last author name
227 % and the end of the author line. i.e., if you had this:
229 % \author{....lastname \thanks{...} \thanks{...} }
230 % ^------------^------------^----Do not want these spaces!
232 % a space would be appended to the last name and could cause every name on that
233 % line to be shifted left slightly. This is one of those "LaTeX things". For
234 % instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
235 % "AB" then you have to do: "\textbf{A}\textbf{B}"
236 % \thanks is no different in this regard, so shield the last } of each \thanks
237 % that ends a line with a % and do not let a space in before the next \thanks.
238 % Spaces after \IEEEmembership other than the last one are OK (and needed) as
239 % you are supposed to have spaces between the names. For what it is worth,
240 % this is a minor point as most people would not even notice if the said evil
241 % space somehow managed to creep in.
244 % The paper headers
245 \markboth{Transactions on Computational Intelligence and AI in Games}%
246 {On Pattern Feature Trends in Large Go Game Corpus}
247 % The only time the second header will appear is for the odd numbered pages
248 % after the title page when using the twoside option.
250 % *** Note that you probably will NOT want to include the author's ***
251 % *** name in the headers of peer review papers. ***
252 % You can use \ifCLASSOPTIONpeerreview for conditional compilation here if
253 % you desire.
258 % If you want to put a publisher's ID mark on the page you can do it like
259 % this:
260 %\IEEEpubid{0000--0000/00\$00.00~\copyright~2007 IEEE}
261 % Remember, if you use this you must call \IEEEpubidadjcol in the second
262 % column for its text to clear the IEEEpubid mark.
266 % use for special paper notices
267 %\IEEEspecialpapernotice{(Invited Paper)}
272 % make the title area
273 \maketitle
276 \begin{abstract}
277 %\boldmath
279 We process a~large corpus of game records of the board game of Go and
280 propose a~way to extract summary information on played moves.
281 We then apply several basic data-mining methods on the summary
282 information to identify the most differentiating features within the
283 summary information, and discuss their correspondence with traditional
284 Go knowledge. We show mappings of the features to player attributes
285 like playing strength or informally perceived ``playing style'' (such as
286 territoriality or aggressivity), and propose applications including
287 seeding real-work ranks of internet players, aiding in Go study, or
288 contribution to Go-theoretical discussion on the scope of ``playing
289 style''.
291 \end{abstract}
292 % IEEEtran.cls defaults to using nonbold math in the Abstract.
293 % This preserves the distinction between vectors and scalars. However,
294 % if the journal you are submitting to favors bold math in the abstract,
295 % then you can use LaTeX's standard command \boldmath at the very start
296 % of the abstract to achieve this. Many IEEE journals frown on math
297 % in the abstract anyway.
299 % Note that keywords are not normally used for peerreview papers.
300 \begin{IEEEkeywords}
301 board games, go, data mining, pattern recongition, player strength, playing style,
302 neural networks, Kohonen maps, principal component analysis
303 \end{IEEEkeywords}
310 % For peer review papers, you can put extra information on the cover
311 % page as needed:
312 % \ifCLASSOPTIONpeerreview
313 % \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
314 % \fi
316 % For peerreview papers, this IEEEtran command inserts a page break and
317 % creates the second title. It will be ignored for other modes.
318 \IEEEpeerreviewmaketitle
322 \section{Introduction}
323 % The very first letter is a 2 line initial drop letter followed
324 % by the rest of the first word in caps.
326 % form to use if the first word consists of a single letter:
327 % \IEEEPARstart{A}{demo} file is ....
329 % form to use if you need the single drop letter followed by
330 % normal text (unknown if ever used by IEEE):
331 % \IEEEPARstart{A}{}demo file is ....
333 % Some journals put the first two words in caps:
334 % \IEEEPARstart{T}{his demo} file is ....
336 % Here we have the typical use of a "T" for an initial drop letter
337 % and "HIS" in caps to complete the first word.
338 \IEEEPARstart{T}{he} field of Computer Go usually focuses on the problem
339 of creating a~program to play the game, finding the best move from a~given
340 board position. \cite{GellySilver2008}
341 We will make use of one method developed in the course
342 of such research and apply it to the analysis of existing game records
343 with the aim of helping humans to play and understand the game better
344 instead.
346 Go is a~two-player full-information board game played
347 on a~square grid (usually $19\times19$ lines) with black and white
348 stones; the goal of the game is to surround the most territory and
349 capture enemy stones. We assume basic familiarity with the game.
351 Many Go players are eager to play using computers (usually over
352 the internet) and review games played by others on computers as well.
353 This means that large amounts of game records are collected and digitally
354 stored, enabling easy processing of such collections. However, so far
355 only little has been done with the available data --- we are aware
356 only of uses for simple win/loss statistics \cite{KGSStats} \cite{KGSAnalytics} \cite{ProGoR}
357 and ``next move'' statistics on a~specific position \cite{Kombilo} \cite{MoyoGo}.
359 We present a~more in-depth approach --- from all played moves, we devise
360 a~compact evaluation of each player. We then explore correlations between
361 evaluations of various players in light of externally given information.
362 This way, we can discover similarity between moves characteristics of
363 players with the same playing strength, or discuss the meaning of the
364 "playing style" concept on the assumption that similar playing styles
365 should yield similar moves characteristics.
368 \section{Data Extraction}
369 \label{pattern-vectors}
371 As the input of our analysis, we use large collections of game records%
372 \footnote{We use the SGF format \cite{SGF} in our implementation.}
373 grouped by the primary object of analysis (player name, player rank, etc.).
374 We process the games by object, generating a description for each
375 played move -- a {\em pattern}, being a combination of several
376 {\em pattern features} described below.
378 We keep track of the most
379 occuring patterns, finally composing $n$-dimensional {\em pattern vector}
380 $\vec p$ of per-pattern counts from the $n$ globally most frequent patterns%
381 \footnote{We use $n=500$ in our analysis.}
382 (the mapping from patterns to vector elements is common for all objects).
383 We can then process and compare just the pattern vectors.
385 The pattern vector elements can have diverse values since for each object,
386 we consider different number of games (and thus patterns).
387 Therefore, we linearly rescale and normalize the values to range $[-1,1]$,
388 the most frequent pattern having the value of $1$ and the least occuring
389 one being $-1$.%
390 \footnote{We did not investigate different methods of re-scaling the vectors;
391 that might be a good way of improving accuracy of our analysis.}
392 Thus, we obtain vectors describing relative frequency of played patterns
393 independent on number of gathered patterns.
395 \subsection{Pattern Features}
396 When deciding how to compose the patterns we use to describe moves,
397 we need to consider a specificity tradeoff --- overly general descriptions carry too few
398 information to discern various player attributes; too specific descriptions
399 gather too few specimen over the games sample and the vector differences are
400 not statistically significant.
402 We have chosen an intuitive and simple approach inspired by pattern features
403 used when computing Elo ratings for candidate patterns in Computer Go play.
404 \cite{Elo} Each pattern is a~combination of several {\em pattern features}
405 (name--value pairs) matched at the position of the played move.
406 We use these features:
408 \begin{itemize}
409 \item capture move flag
410 \item atari move flag
411 \item atari escape flag
412 \item contiguity-to-last flag --- whether the move has been played in one of 8 neighbors of the last move
413 \item contiguity-to-second-last flag
414 \item board edge distance --- only up to distance 4
415 \item spatial pattern --- configuration of stones around the played move
416 \end{itemize}
418 The spatial patterns are normalized (using a dictionary) to be always
419 black-to-play and maintain translational and rotational symmetry.
420 Configurations of radius between 2 and 9 in the gridcular metric%
421 \footnote{The {\em gridcular} metric
422 $d(x,y) = |\delta x| + |\delta y| + \max(|\delta x|, |\delta y|)$ defines
423 a circle-like structure on the Go board square grid. \cite{SpatPat} }
424 are matched.
426 Pattern vectors representing these features contain information on
427 played shape as well as basic representation of tactical dynamics
428 --- threats to capture stones, replying to last move, or ignoring
429 opponent's move elsewhere to return to an urgent local situation.
430 The shapes most frequently correspond to opening moves
431 (either in empty corners and sides, or as part of {\em joseki}
432 --- commonly played sequences) characteristic for a certain
433 strategic aim. In the opening, even a single-line difference
434 in the distance from the border can have dramatic impact on
435 further local and global development.
437 \subsection{Implementation}
439 We have implemented the data extraction by making use of the pattern
440 features matching implementation%
441 \footnote{The pattern features matching was developed according
442 to the Elo-rating playing scheme. \cite{Elo}}
443 within the Pachi go-playing program \cite{Pachi}.
444 We extract information on players by converting the SGF game
445 records to GTP stream \cite{GTP} that feeds Pachi's {\tt patternscan}
446 engine, outputting a~single {\em patternspec} (string representation
447 of the particular pattern features combination) per move. Of course,
448 only moves played by the appropriate color in the game are collected.
450 \section{Data Mining}
451 \label{data-mining}
453 To assess the properties of gathered pattern vectors
454 and their influence on playing styles,
455 we process the data by several basic data minining techniques.
457 The first two methods {\em (analytic)} rely purely on data gathered
458 from the game collection
459 and serve to show internal structure and correlations within the data set.
461 Principal Component Analysis finds orthogonal vector components that
462 have the largest variance.
463 Reversing the process can indicate which patterns correlate with each component.
464 Additionally, PCA can be used as vector preprocessing for methods
465 that are negatively sensitive to pattern vector component correlations.
467 The~second method of Kohonen Maps
468 is based on the theory of self-organizing maps of abstract units (neurons) that
469 compete against each other for the representation of the input space.
470 Because neurons in the network are organized in a two-dimensional plane,
471 the trained network spreads the vectors on a 2D plane,
472 allowing for visualization of clusters of players with similar properties.
475 Furthermore, we use two \emph{classification} methods that assign
476 each pattern vector $\vec P$ an \emph{output vector $\vec O$,
477 representing e.g.~information about styles, player's strength or even
478 meta-information like the player's era or a country of origin.
479 Initially, the methods must be calibrated (trained) on some prior knowledge,
480 usually in the form of \emph{reference pairs} of pattern vectors
481 and the associated output vectors.
483 Moreover, the reference set can be divided into training and testing pairs
484 and the methods can be compared by the mean square error on testing data set
485 (difference of output vectors approximated by the method and their real desired value).
487 %\footnote{However, note that dicrete characteristics such as country of origin are
488 %not very feasible to use here, since WHAT??? is that even true?? }
490 The $k$-Nearest Neighbors \cite{CoverHart1967} classifier
491 approximates $\vec O$ by composing the output vectors
492 of $k$ reference pattern vectors closest to $\vec P$.
494 The other classifier is a~multi-layer feed-forward Artificial Neural Network:
495 the neural network can learn correlations between input and output vectors
496 and generalize the ``knowledge'' to unknown vectors; it can be more flexible
497 in the interpretation of different pattern vector elements and discern more
498 complex relations than the kNN classifier,
499 but may not be as stable and requires larger training sample.
501 \subsection{Principal Component Analysis}
502 \label{data-mining}
503 We use Principal Component Analysis \emph{PCA} \cite{Jolliffe1986}
504 to reduce the dimensions of the pattern vectors while preserving
505 as much information as possible, assuming inter-dependencies between
506 pattern vector dimensions are linear.
508 Briefly, PCA is an eigenvalue decomposition of a~covariance matrix of centered pattern vectors,
509 producing a~linear mapping $o$ from $n$-dimensional vector space
510 to a~reduced $m$-dimensional vector space.
511 The $m$ eigenvectors of the original vectors' covariance matrix
512 with the largest eigenvalues are used as the base of the reduced vector space;
513 the eigenvectors form projection matrix $W$.
515 For each original pattern vector $\vec p_i$,
516 we obtain its new representation $\vec r_i$ in the PCA base
517 as shown in the following equation:
518 \begin{equation}
519 \vec r_i = W \cdot \vec p_i
520 \end{equation}
522 The whole process is described in the Algorithm \ref{alg:pca}.
524 \begin{algorithm}
525 \caption{PCA -- Principal Component Analysis}
526 \begin{algorithmic}
527 \label{alg:pca}
528 \REQUIRE{$m > 0$, set of players $R$ with pattern vectors $p_r$}
529 \STATE $\vec \mu \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_r}$
530 \FOR{ $r \in R$}
531 \STATE $\vec p_r \leftarrow \vec p_r - \vec \mu$
532 \ENDFOR
533 \FOR{ $(i,j) \in \{1,... ,n\} \times \{1,... ,n\}$}
534 \STATE $\mathit{Cov}[i,j] \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_{ri} \cdot \vec p_{rj}}$
535 \ENDFOR
536 \STATE Compute Eigenvalue Decomposition of $\mathit{Cov}$ matrix
537 \STATE Get $m$ largest eigenvalues
538 \STATE Most significant eigenvectors ordered by decreasing eigenvalues form the rows of matrix $W$
539 \FOR{ $r \in R$}
540 \STATE $\vec r_r\leftarrow W \vec p_r$
541 \ENDFOR
542 \end{algorithmic}
543 \end{algorithm}
545 \label{pearson}
546 We want to find correlations between PCA dimensions and
547 some prior knowledge (player rank, style vector).
548 For this purpose, we compute the well-known
549 {\em Pearson product-moment correlation coefficient} \cite{Pearson},
550 measuring the strength of the linear dependence%
551 \footnote{A desirable property of PMCC is that it is invariant to translations and rescaling
552 of the vectors.}
553 between the dimensions:
555 $$ r_{X,Y} = {{\rm cov}(X,Y) \over \sigma_X \sigma_Y} $$
557 \subsection{Kohonen Maps}
558 \label{koh}
559 Kohonen map is a self-organizing network with neurons spread evenly over a~two-dimensional plane.
560 Neurons $\vec n$ in the map compete for representation of portions of the input vector space,
561 each vector being represented by some neuron.
562 The network is trained so that the neurons
563 that are topologically close tend to represent vectors that are close in suitable metric as well.
565 First, a~randomly initialized network is sequentially trained;
566 in each iteration, we choose a~random training vector $\vec t$
567 and find the {\em winner neuron} $\vec w$ that is closest to $\vec t$ in Euclidean metric.
569 We then adapt neurons $n$ from the neighborhood of $\vec w$ employing the equation
570 \begin{equation}
571 \vec n = \vec n + \alpha \cdot \mathit{Influence}(\vec w, \vec n) \cdot (\vec t - \vec n)
572 \end{equation}
573 where $\alpha$ is a learning parameter, usually decreasing in time.
574 $Influence()$ is a function that forces neurons to spread.
575 Such function is usually realised using a mexican hat function or a difference-of-gaussians
576 \cite{TODO}.
577 The state of the network can be evaluated by calculating mean square difference
578 between each $\vec t \in T$ and its corresponding winner neuron $\vec w_t$:
579 \begin{equation}
580 \mathit{Error}(N,T) = \sum_{\vec t \in T}{|\vec w_t - \vec t|}
581 \end{equation}
584 \begin{algorithm}
585 \caption{Kohonen maps -- training}
586 \begin{algorithmic}
587 \label{alg:koh}
588 \REQUIRE{Set of training vectors $T$, input dimension $D$}
589 \REQUIRE{max number of iterations $M$, desired error $E$}
590 \STATE $N \leftarrow \{\vec n | \vec n$ random, $\mathit{dim}(\vec n) = D\}$
591 \REPEAT
592 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
593 \STATE $\vec t \leftarrow \mathit{PickRandom}(T)$
594 \FORALL{$\vec n \in N$}
595 \STATE $D[\vec n] \leftarrow \mathit{EuclideanDistance}(\vec n, \vec t)$
596 \ENDFOR
597 \STATE Find $ \vec w \in N$ so that $D[\vec w] <= D[\vec m], \forall \vec m \in N$
598 \FORALL{$\vec n \in \mathit{TopologicalNeigbors}(N, \vec w)$}
599 \STATE $\vec n \leftarrow \vec n + \alpha(It) \cdot \mathit{Influence}(\vec w, \vec n) \cdot ( \vec t - \vec n ) $
600 \ENDFOR
601 \UNTIL{$\mathit{Error}(N, T) < E$ or $ \mathit{It} > M$}
602 \end{algorithmic}
603 \end{algorithm}
606 \subsection{k-nearest Neighbors Classifier}
607 \label{knn}
608 Our goal is to approximate player's output vector $\vec O$;
609 we know his pattern vector $\vec P$.
610 We further assume that similarities in players' pattern vectors
611 uniformly correlate with similarities in players' output vectors.
613 We require a set of reference players $R$ with known \emph{pattern vectors} $\vec p_r$
614 and \emph{output vectors} $\vec o_r$.
616 $\vec O$ is approximated as a~weighted average of \emph{output vectors}
617 $\vec o_i$ of $k$ players with \emph{pattern vectors} $\vec p_i$ closest to $\vec P$.
618 This is illustrated in the Algorithm \ref{alg:knn}.
619 Note that the weight is a function of distance and is not explicitly defined in Algorithm \ref{alg:knn}.
620 During our research, exponentially decreasing weight has proven to be sufficient.
622 \begin{algorithm}
623 \caption{k-Nearest Neighbors}
624 \begin{algorithmic}
625 \label{alg:knn}
626 \REQUIRE{pattern vector $\vec P$, $k > 0$, set of reference players $R$}
627 \FORALL{$r \in R$ }
628 \STATE $D[r] \leftarrow \mathit{EuclideanDistance}(\vec p_r, \vec P)$
629 \ENDFOR
630 \STATE $N \leftarrow \mathit{SelectSmallest}(k, R, D)$
631 \STATE $\vec O \leftarrow \vec 0$
632 \FORALL{$r \in N $}
633 \STATE $\vec O \leftarrow \vec O + \mathit{Weight}(D[r]) \cdot \vec o_r $
634 \ENDFOR
635 \end{algorithmic}
636 \end{algorithm}
638 \subsection{Neural Network Classifier}
639 \label{neural-net}
641 Feed-forward neural networks \cite{ANN} are known for their ability to generalize
642 and find correlations between input patterns and output classifications.
643 Before use, the network is iteratively trained on the training data
644 until the error on the training set is reasonably small.
646 %Neural network is an adaptive system that must undergo a training
647 %period similarly to the requirement
648 %of reference vectors for the k-Nearest Neighbors algorithm above.
650 \subsubsection{Computation and activation of the NN}
651 Technically, the neural network is a network of interconnected
652 computational units called neurons.
653 A feedforward neural network has a layered topology;
654 it usually has one \emph{input layer}, one \emph{output layer}
655 and an arbitrary number of \emph{hidden layers} between.
657 Each neuron $i$ is connected to all neurons in the previous layer and each connection has its weight $w_{ij}$
659 The computation proceeds in discrete time steps.
660 In the first step, the neurons in the \emph{input layer}
661 are \emph{activated} according to the \emph{input vector}.
662 Then, we iteratively compute output of each neuron in the next layer
663 until the output layer is reached.
664 The activity of output layer is then presented as the result.
666 The activation $y_i$ of neuron $i$ from the layer $I$ is computed as
667 \begin{equation}
668 y_i = f\left(\sum_{j \in J}{w_{ij} y_j}\right)
669 \end{equation}
670 where $J$ is the previous layer, while $y_j$ is the activation for neurons from $J$ layer.
671 Function $f()$ is a~so-called \emph{activation function}
672 and its purpose is to bound the outputs of neurons.
673 A typical example of an activation function is the sigmoid function.%
674 \footnote{A special case of the logistic function, defined by the formula
675 $\sigma(x)=\frac{1}{1+e^{-(rx+k)}}$; parameters control the growth rate ($r$)
676 and the x-position ($k$).}
678 \subsubsection{Training}
679 Training of the feed-forward neural network usually involves some
680 modification of supervised Backpropagation learning algorithm.
681 We use first-order optimization algorithm called RPROP. \cite{Riedmiller1993}
683 %Because the \emph{reference set} is usually not very large,
684 %we have devised a simple method for its extension.
685 %This enhancement is based upon adding random linear combinations
686 %of \emph{style and pattern vectors} to the training set.
688 As outlined above, the training set $T$ consists of
689 $(\vec p_i, \vec o_i)$ pairs.
690 The training algorithm is shown in Algorithm \ref{alg:tnn}.
692 \begin{algorithm}
693 \caption{Training Neural Network}
694 \begin{algorithmic}
695 \label{alg:tnn}
696 \REQUIRE{Train set $T$, desired error $e$, max iterations $M$}
697 \STATE $N \leftarrow \mathit{RandomlyInitializedNetwork}()$
698 \STATE $\mathit{It} \leftarrow 0$
699 \REPEAT
700 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
701 \STATE $\Delta \vec w \leftarrow \vec 0$
702 \STATE $\mathit{TotalError} \leftarrow 0$
703 %\FORALL{$(\overrightarrow{Input}, \overrightarrow{DesiredOutput}) \in T$}
704 %\STATE $\overrightarrow{Output} \leftarrow Result(N, \overrightarrow{Input})$
705 %\STATE $E \leftarrow |\overrightarrow{DesiredOutput} - \overrightarrow{Output}|$
706 \FORALL{$(\mathit{Input}, \mathit{DesiredOutput}) \in T$}
707 \STATE $\mathit{Output} \leftarrow \mathit{Result}(N, \mathit{Input})$
708 \STATE $\mathit{Error} \leftarrow |\mathit{DesiredOutput} - \mathit{Output}|$
709 \STATE $\Delta \vec w \leftarrow \Delta \vec w + \mathit{WeightUpdate}(N,\mathit{Error})$
710 \STATE $\mathit{TotalError} \leftarrow \mathit{TotalError} + \mathit{Error}$
711 \ENDFOR
712 \STATE $N \leftarrow \mathit{ModifyWeights}(N, \Delta \vec w)$
713 \UNTIL{$\mathit{TotalError} < e$ or $ \mathit{It} > M$}
714 \end{algorithmic}
715 \end{algorithm}
717 \subsection{Implementation}
719 We have implemented the data mining methods as the
720 ``gostyle'' open-source framework \cite{GoStyle},
721 made available under the GNU GPL licence.
723 The majority of our basic processing and the analysis parts
724 are implemented in the Python \cite{Python2005} programming language.
725 We use several external libraries, most notably the MDP library \cite{MDP} (used for PCA analysis)
726 and Kohonen library \cite{KohonenPy}.
727 The neural network part of the project is written using the libfann C library\cite{Nissen2003}.
730 \section{Strength Estimator}
732 \begin{figure*}[!t]
733 \centering
734 \includegraphics[width=7in]{strength-pca}
735 \caption{PCA of by-strength vectors}
736 \label{fig:strength_pca}
737 \end{figure*}
739 First, we have used our framework to analyse correlations of pattern vectors
740 and playing strength. Like in other competitively played board games, Go players
741 receive real-world {\em rating number} based on tournament games,
742 and {\em rank} based on their rating.%
743 \footnote{Elo-like rating system \cite{GoR} is usually used,
744 corresponding to even win chances for game of two players with the same rank,
745 and about 2:3 win chance for stronger in case of one rank difference.}%
746 \footnote{Professional ranks and dan ranks in some Asia countries may
747 be assigned differently.}
748 The amateur ranks range from 30-kyu (beginner) to 1-kyu (intermediate)
749 and then follows 1-dan to 7-dan\footnote{9-dan in some systems.} (top-level player).
750 Multiple independent real-world ranking scales exist
751 (geographically based), also online servers maintain their own user ranking;
752 the difference between scales can be up to several ranks and the rank
753 distributions also differ. \cite{RankComparison}
755 As the source game collection, we use Go Teaching Ladder reviews archive%
756 \footnote{The reviews contain comments and variations --- we consider only the main
757 variation with the actual played game.}
758 \cite{GTL} --- this collection contains 7700 games of players with strength ranging
759 from 30-kyu to 4-dan; we consider only even games with clear rank information,
760 and then randomly separate 770 games as a testing set.
761 Since the rank information is provided by the users and may not be consistent,
762 we are forced to take a simplified look at the ranks,
763 discarding the differences between various systems and thus somewhat
764 increasing error in our model.\footnote{Since our results seem satisfying,
765 we did not pursue to try another collection;
766 one could e.g. look at game archives of some Go server.}
768 First, we have created a single pattern vector for each rank, from 30-kyu to 4-dan;
769 we have performed PCA analysis on the pattern vectors, achieving near-perfect
770 rank correspondence in the first PCA dimension%
771 \footnote{The eigenvalue of the second dimension was four times smaller,
772 with no discernable structure revealed within the lower-order eigenvectors.}
773 (figure \ref{fig:strength_pca}).
775 We measure the accuracy of strength approximation by the first dimension
776 using Pearson's $r$ (see \ref{pearson}), yielding quite satisfying value of $r=0.979$
777 implying extremely strong correlation.
778 Using the eigenvector position directly for classification
779 of players within the test group yields MSE TODO, thus providing
780 reasonably satisfying accuracy by itself.
782 To further enhance the strength estimator accuracy,
783 we have tried to train a NN classifier on our train set, consisting
784 of one $(\vec p, {\rm rank})$ pair per player --- we use the pattern vector
785 for activation of input neurons and rank number as result of the output
786 neuron. We then proceeded to test the NN on per-player pattern vectors built
787 from the games in the test set, yielding MSE of TODO with TODO games per player
788 on average.
791 \section{Style Estimator}
793 As a~second case study for our pattern analysis, we investigate pattern vectors $\vec p$
794 of various well-known players, their relationships and correlations to prior
795 knowledge to explore its correlaction with extracted patterns. We look for
796 relationships between pattern vectors and perceived ``playing style'' and
797 attempt to use our classifiers to transform pattern vector $\vec p$ to style vector $\vec s$.
799 The source game collection is GoGoD Winter 2008 \cite{GoGoD} containing 55000
800 professional games, dating from the early Go history 1500 years ago to the present.
801 We consider only games of a small subset of players (fig. \ref{fig:style_marks});
802 we have chosen these for being well-known within the players community,
803 having large number of played games in our collection and not playing too long
804 ago.\footnote{Over time, many commonly used sequences get altered, adopted and
805 dismissed; usual playing conditions can also differ significantly.}
807 \subsection{Expert-based knowledge}
808 \label{style-vectors}
809 In order to provide a reference frame for our style analysis,
810 we have gathered some expert-based information about various
811 traditionally perceived style aspects.
812 This expert-based knowledge allows us to predict styles of unknown players based on
813 the similarity of their pattern vectors, as well as discover correlations between
814 styles and proportions of played patterns.
816 Experts were asked to mark each style aspect of the given players
817 on the scale from 1 to 10. The style aspects are defined as shown:
819 %\vspace{4mm}
820 %\noindent
821 \begin{table}
822 \begin{center}
823 \caption{Styles}
824 \begin{tabular}{|c|c|c|}
825 \hline
826 Style & 1 & 10\\ \hline
827 Territoriality $\tau$ & Moyo & Territory \\
828 Orthodoxity $\omega$ & Classic & Novel \\
829 Aggressivity $\alpha$ & Calm & Figting \\
830 Thickness $\theta$ & Safe & Shinogi \\ \hline
831 \end{tabular}
832 \end{center}
833 \end{table}
834 %\vspace{4mm}
836 Averaging this expert based evaluation yields
837 \emph{reference style vector} $\vec s_r$ (of dimension $4$) for each player $r$
838 from the set of \emph{reference players} $R$.
840 \begin{table}[!t]
841 % increase table row spacing, adjust to taste
842 \renewcommand{\arraystretch}{1.3}
843 \caption{Covariance Measure of Prior Information Dimensions}
844 \label{fig:style_marks_r}
845 \centering
846 % Some packages, such as MDW tools, offer better commands for making tables
847 % than the plain LaTeX2e tabular which is used here.
848 \begin{tabular}{|r||r||r||r||r||r|}
849 \hline
850 & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & year \\
851 \hline
852 $\tau$ &$1.000$&$-0.438$&$-0.581$&$ 0.721$&$ 0.108$\\
853 $\omega$& &$ 1.000$&$ 0.682$&$ 0.014$&$-0.021$\\
854 $\alpha$& & &$ 1.000$&$-0.081$&$ 0.030$\\
855 $\theta$& &\multicolumn{1}{c||}{---}
856 & &$ 1.000$&$-0.073$\\
857 y. & & & & &$ 1.000$\\
858 \hline
859 \end{tabular}
860 \end{table}
862 Three high-level Go players (Alexander Dinerstein 3-pro, Motoki Noguchi
863 7-dan and V\'{i}t Brunner 4-dan) have judged style of the reference
864 players.
865 The complete list of answers is in table \ref{fig:style_marks}.
866 Mean standard deviation of the answers is 0.952,
867 making the data reasonably reliable,
868 though much larger sample would of course be more desirable.
869 We have also found significant correlation between the various
870 style aspects, as shown by the Pearson's $r$ values
871 in table \ref{fig:style_marks_r}.
873 \begin{table}[!t]
874 % increase table row spacing, adjust to taste
875 \renewcommand{\arraystretch}{1.3}
876 \begin{threeparttable}
877 \caption{Expert-Based Style Aspects of Selected Professionals\tnote{1} \tnote{2}}
878 \label{fig:style_marks}
879 \centering
880 % Some packages, such as MDW tools, offer better commands for making tables
881 % than the plain LaTeX2e tabular which is used here.
882 \begin{tabular}{|c||c||c||c||c|}
883 \hline
884 {Player} & $\tau$ & $\omega$ & $\alpha$ & $\theta$ \\
885 \hline
886 Go Seigen\tnote{3} & $6.0 \pm 2.0$ & $9.0 \pm 1.0$ & $8.0 \pm 1.0$ & $5.0 \pm 1.0$ \\
887 Ishida Yoshio\tnote{4}&$8.0 \pm 1.4$ & $5.0 \pm 1.4$ & $3.3 \pm 1.2$ & $5.3 \pm 0.5$ \\
888 Miyazawa Goro & $1.5 \pm 0.5$ & $10 \pm 0 $ & $9.5 \pm 0.5$ & $4.0 \pm 1.0$ \\
889 Yi Ch'ang-ho\tnote{5}& $7.0 \pm 0.8$ & $5.0 \pm 1.4$ & $2.6 \pm 0.9$ & $2.6 \pm 1.2$ \\
890 Sakata Eio & $7.6 \pm 1.7$ & $4.6 \pm 0.5$ & $7.3 \pm 0.9$ & $8.0 \pm 1.6$ \\
891 Fujisawa Hideyuki & $3.5 \pm 0.5$ & $9.0 \pm 1.0$ & $7.0 \pm 0.0$ & $4.0 \pm 0.0$ \\
892 Otake Hideo & $4.3 \pm 0.5$ & $3.0 \pm 0.0$ & $4.6 \pm 1.2$ & $3.6 \pm 0.9$ \\
893 Kato Masao & $2.5 \pm 0.5$ & $4.5 \pm 1.5$ & $9.5 \pm 0.5$ & $4.0 \pm 0.0$ \\
894 Takemiya Masaki & $1.3 \pm 0.5$ & $6.3 \pm 2.1$ & $7.0 \pm 0.8$ & $1.3 \pm 0.5$ \\
895 Kobayashi Koichi & $9.0 \pm 1.0$ & $2.5 \pm 0.5$ & $2.5 \pm 0.5$ & $5.5 \pm 0.5$ \\
896 Cho Chikun & $9.0 \pm 0.8$ & $7.6 \pm 0.9$ & $6.6 \pm 1.2$ & $9.0 \pm 0.8$ \\
897 Ma Xiaochun & $8.0 \pm 2.2$ & $6.3 \pm 0.5$ & $5.6 \pm 1.9$ & $8.0 \pm 0.8$ \\
898 Yoda Norimoto & $6.3 \pm 1.7$ & $4.3 \pm 2.1$ & $4.3 \pm 2.1$ & $3.3 \pm 1.2$ \\
899 Luo Xihe & $7.3 \pm 0.9$ & $7.3 \pm 2.5$ & $7.6 \pm 0.9$ & $6.0 \pm 1.4$ \\
900 O Meien & $2.6 \pm 1.2$ & $9.6 \pm 0.5$ & $8.3 \pm 1.7$ & $3.6 \pm 1.2$ \\
901 Rui Naiwei & $4.6 \pm 1.2$ & $5.6 \pm 0.5$ & $9.0 \pm 0.8$ & $3.3 \pm 1.2$ \\
902 Yuki Satoshi & $3.0 \pm 1.0$ & $8.5 \pm 0.5$ & $9.0 \pm 1.0$ & $4.5 \pm 0.5$ \\
903 Hane Naoki & $7.5 \pm 0.5$ & $2.5 \pm 0.5$ & $4.0 \pm 0.0$ & $4.5 \pm 1.5$ \\
904 Takao Shinji & $5.0 \pm 1.0$ & $3.5 \pm 0.5$ & $5.5 \pm 1.5$ & $4.5 \pm 0.5$ \\
905 Yi Se-tol & $5.3 \pm 0.5$ & $6.6 \pm 2.5$ & $9.3 \pm 0.5$ & $6.6 \pm 1.2$ \\
906 Yamashita Keigo\tnote{4}&$2.0\pm 0.0$& $9.0 \pm 1.0$ & $9.5 \pm 0.5$ & $3.0 \pm 1.0$ \\
907 Cho U & $7.3 \pm 2.4$ & $6.0 \pm 0.8$ & $5.3 \pm 1.7$ & $6.3 \pm 1.7$ \\
908 Gu Li & $5.6 \pm 0.9$ & $7.0 \pm 0.8$ & $9.0 \pm 0.8$ & $4.0 \pm 0.8$ \\
909 Chen Yaoye & $6.0 \pm 1.0$ & $4.0 \pm 1.0$ & $6.0 \pm 1.0$ & $5.5 \pm 0.5$ \\
910 \hline
911 \end{tabular}
912 \begin{tablenotes}
913 \item [1] Including standard deviation. Only players where we got at least two out of tree answers are included.
914 \item [2] We consider era as one of factors when finding correlations with pattern vectors; we quantify era by taking median year over all games played by the player. Since this quantity does not fit to the table, we at least sort the players ascending by their median year.
915 \item [3] We do not consider games of Go Seigen due to him playing across several distinct Go-playing eras and thus specifically high diversity of patterns.
916 \item [4] We do not consider games of Ishida Yoshio and Yamashita Keigo for the PCA analysis since they are significant outliers, making high-order dimensions much like purely ``similarity to this player''. Takemiya Masaki has the similar effect for the first dimension, but this corresponds to common knowledge of him being an extreme proponent of anti-territorial (``moyo'') style.
917 \item [5] We consider games only up to year 2004, since Yi Ch'ang-ho was prominent representative of a balanced, careful player until then, but is regarded to have altered his style significantly afterwards.
918 \end{tablenotes}
919 \end{threeparttable}
920 \end{table}
922 \subsection{Style Components Analysis}
924 \begin{figure}[!t]
925 \centering
926 \includegraphics[width=3.75in]{style-pca}
927 \caption{PCA of per-player vectors}
928 \label{fig:style_pca}
929 \end{figure}
931 We have looked at the five most significant dimensions of the pattern data
932 yielded by the PCA analysis of the reference player set%
933 \footnote{We also tried to observe PCA effect of removing outlying Takemiya
934 Masaki. This second dimension strongly
935 correlated to territoriality and third dimension strongly correlacted to era,
936 however the first dimension remained mysteriously uncorrelated and with no
937 obvious interpretation.}
938 (fig. \ref{fig:style_pca} shows three).
939 We have again computed the Pearson's $r$ for all combinations of PCA dimensions
940 and dimensions of the prior knowledge style vectors to find correlations.
942 \begin{table}[!t]
943 % increase table row spacing, adjust to taste
944 \renewcommand{\arraystretch}{1.3}
945 \caption{Covariance Measure of Patterns and Prior Information}
946 \label{fig:style_r}
947 \centering
948 % Some packages, such as MDW tools, offer better commands for making tables
949 % than the plain LaTeX2e tabular which is used here.
950 \begin{tabular}{|c||c||c||c||c||c|}
951 \hline
952 Eigenval. & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & Year \\
953 \hline
954 0.447 & {\bf -0.530} & 0.323 & 0.298 & {\bf -0.554} & 0.090 \\
955 0.194 & {\bf -0.547} & 0.215 & 0.249 & -0.293 & {\bf -0.630} \\
956 0.046 & 0.131 & -0.002 & -0.128 & 0.242 & {\bf -0.630} \\
957 0.028 & -0.011 & 0.225 & 0.186 & 0.131 & 0.067 \\
958 0.024 & -0.181 & 0.174 & -0.032 & -0.216 & 0.352 \\
959 \hline
960 \end{tabular}
961 \end{table}
963 \begin{table}[!t]
964 % increase table row spacing, adjust to taste
965 \renewcommand{\arraystretch}{1.3}
966 \caption{Characteristic Patterns of PCA Dimensions}
967 \label{fig:style_ptterns}
968 \centering
969 % Some packages, such as MDW tools, offer better commands for making tables
970 % than the plain LaTeX2e tabular which is used here.
971 \begin{tabular}{|cccc|}
972 \hline
973 PCA1 top &
974 \begin{psgopartialboard*}{(8,1)(12,6)}
975 \stone[\marktr]{black}{k}{4}
976 \end{psgopartialboard*} &
977 \begin{psgopartialboard*}{(3,1)(5,6)}
978 \stone{white}{d}{3}
979 \stone[\marktr]{black}{d}{5}
980 \end{psgopartialboard*} &
981 \begin{psgopartialboard*}{(5,1)(10,6)}
982 \stone{white}{f}{3}
983 \stone[\marktr]{black}{j}{4}
984 \end{psgopartialboard*} \\
985 $0.447 \cdot$ & $0.274$ & $0.086$ & $0.083$ \\
986 & side extension or \par 4--4 corner opening & high corner approach & high distant pincer \\
987 PCA1 bot. &
988 \begin{psgopartialboard*}{(3,1)(7,6)}
989 \stone{white}{d}{4}
990 \stone[\marktr]{black}{f}{3}
991 \end{psgopartialboard*} &
992 \begin{psgopartialboard*}{(3,1)(7,6)}
993 \stone{white}{c}{6}
994 \stone{black}{d}{4}
995 \stone[\marktr]{black}{f}{3}
996 \end{psgopartialboard*} &
997 \begin{psgopartialboard*}{(3,1)(7,6)}
998 \stone{black}{d}{4}
999 \stone[\marktr]{black}{f}{3}
1000 \end{psgopartialboard*} \\
1001 $0.447 \cdot$ & $-0.399$ & $-0.399$ & $-0.177$ \\
1002 & low corner approach & low corner reply & low corner enclosure \\
1003 \hline
1004 \end{tabular}
1005 \end{table}
1007 It is immediately
1008 obvious both from the measured $r$ and visual observation
1009 that by far the most significant vector corresponds very well
1010 to the player territoriality,\footnote{Cho Chikun, perhaps the best-known
1011 super-territorial player, is not well visible in the cluster, but he is
1012 positioned around $-0.8$ on the first dimension.}
1013 confirming the intuitive notion that this aspect of style
1014 is the one easiest to pin-point and also
1015 most obvious in the played shapes and sequences
1016 (that can obviously aim directly at taking secure territory
1017 or building center-oriented framework). Thick (solid) play also plays
1018 a role, but these two style dimensions have been already shown
1019 to be correlated in prior data.
1021 In other PCA dimensions correspond well to to identify and name, but there
1022 certainly is some influence of the styles on the patterns;
1023 the found correlations are presented in table \ref{fig:style_r}.
1024 (Larger absolute value means better linear correspondence.)
1026 We also list the characteristic spatial patterns of the PCA dimension
1027 extremes (table \ref{fig:style_patterns}) --- however, naive inference
1028 of characteristic patterns based on projection matrix coefficients
1029 does not work well, better methods will have to be researched.%
1030 \footnote{For example, as one of highly ranked ``Takemiya's'' PCA1 patterns,
1031 3,3 corner opening was generated, completely inappropriately;
1032 it reflects some weak ordering in bottom half of the dimension,
1033 not global ordering within the dimension.}
1035 We have not found significant correspondence for the style aspects
1036 representing aggressiveness and novelty of play; this means either
1037 these are not as well defined, the prior information do not represent
1038 them accurately, or we cannot capture them well with our chosen pattern
1039 extraction techniques.
1041 We believe that the next step
1042 in interpreting our results will be more refined prior information input
1043 and precise analysis by Go experts.
1045 Kohonen map view.
1047 \subsection{Style Classification}
1049 %TODO vsude zcheckovat jestli pouzivame stejny cas "we perform, we apply" X "we have performed, ..."
1051 Apart from the PCA-based analysis, we applied neural network (sec. \ref{neural-net})
1052 and $k$-NN classifiers (sec. \ref{knn}).
1054 To compare and evaluate both methods, we have performed $5$-fold cross validation \cite{TODO} and
1055 compared them with a~random classificator.
1056 In the $5$-fold cross-validation, we randomly divide the training set into $5$ distinct parts with comparable
1057 sizes and then iteratively use each part as a~testing set (yielding square error value), while
1058 the rest (remaining $4$ parts) is taken as a~training set. The square errors across all $5$ iterations are
1059 averaged, yielding mean square error.
1061 The results are shown in table \ref{crossval-cmp}. Second to fifth columns in the table represent
1062 mean square error of different styles (see \ref{style vectors}), $\mathit{Mean}$ is the
1063 mean square error across the styles and finally, the last column $\mathit{Comp}$
1064 represents $\mathit{Mean}_\mathit{RND} / \mathit{X}$ -- comparison of mean square error (across styles)
1065 with random classificator. To minimize the
1066 effect of random variables, all numbers were taken as an average of $30$ runs of the cross validation.
1068 \begin{table}[!t]
1069 \label{crossval-cmp}
1070 \begin{center}
1071 \caption{Comparison of style classificators}
1072 \begin{tabular}{|c|c|c|c|c|c|c|}
1073 \hline
1074 %Classifier & $\sigma_\tau$ & $\sigma_\omega$ & $\sigma_\alpha$ & $\sigma_\theta$ & Tot $\sigma$ & $\mathit{RndC}$\\ \hline
1075 %Neural network & 0.420 & 0.488 & 0.365 & 0.371 & 0.414 & 1.82 \\
1076 %$k$-NN ($k=4$) & 0.394 & 0.507 & 0.457 & 0.341 & 0.429 & 1.76 \\
1077 %Random classifier & 0.790 & 0.773 & 0.776 & 0.677 & 0.755 & 1.00 \\ \hline
1078 &\multicolumn{5}{|c|}{MSE}& \\ \hline
1079 {Classifier} & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & {\bf Mean} & {\bf Comp}\\ \hline
1080 Neural network & 0.173 & 0.236 & 0.136 & 0.143 & 0.172 & 3.3 \\
1081 $k$-NN ($k=4$) & 0.156 & 0.257 & 0.209 & 0.116 & 0.184 & 3.1\\
1082 Random classifier & 0.544 & 0.640 & 0.647 & 0.458 & 0.572 & 1.0 \\ \hline
1083 \end{tabular}
1084 \end{center}
1085 \end{table}
1087 \subsubsection{Reference (Training) Data}
1088 As a~reference data, we use expert based knowledge presented in section \ref{style-vectors}.
1089 For both methods to yield comparable errors, we have rescaled style vectors to interval $[-1,1]$
1090 (since neural network's activation function has such range).
1092 % TODO presunout konkretni parametry do Appendixu? (neni jich tolik, mozna ne)
1093 \subsubsection{$k$-NN parameters}
1094 $k=4$, Weight function is $0.8^{(10*EuclideanDistance)}$
1096 \subsubsection{Neural network's parameters}
1097 $3$ layers, $23 - 30 - 4$ architecture
1100 \section{Proposed Applications}
1102 We believe that our findings might be useful for many applications
1103 in the area of Go support software as well as Go-playing computer engines.
1105 The style analysis can be an excellent teaching aid --- classifying style
1106 dimensions based on player's pattern vector, many study recommendations
1107 can be given, e.g. about the professional games to replay, the goal being
1108 balancing understanding of various styles to achieve well-rounded skill set.
1109 This was also our original aim when starting the research and a user-friendly
1110 tool based on our work is now being created.
1112 We hope that more strong players will look into the style dimensions found
1113 by our statistical analysis --- analysis of most played patterns of prospective
1114 opponents might prepare for the game, but we especially hope that new insights
1115 on strategic purposes of various shapes and general human understanding
1116 of the game might be achieved by investigating the style-specific patterns.
1118 Classifying playing strength of a pattern vector of a player can be used
1119 e.g. to help determine initial real-world rating of a player before their
1120 first tournament based on games played on the internet; some players especially
1121 in less populated areas could get fairly strong before playing their first
1122 real tournament.
1124 Analysis of pattern vectors extracted from games of Go-playing programs
1125 in light of the shown strength and style distributions might help to
1126 highlight some weaknesses and room for improvements. (However, since
1127 correlation does not imply causation, simply optimizing Go-playing programs
1128 according to these vectors is unlikely to yield good results.)
1129 Another interesting applications in Go-playing programs might be strength
1130 adjustment; the program can classify the player's level based on the pattern
1131 vector from its previous games and auto-adjust its difficulty settings
1132 accordingly to provide more even games for beginners.
1135 % An example of a floating figure using the graphicx package.
1136 % Note that \label must occur AFTER (or within) \caption.
1137 % For figures, \caption should occur after the \includegraphics.
1138 % Note that IEEEtran v1.7 and later has special internal code that
1139 % is designed to preserve the operation of \label within \caption
1140 % even when the captionsoff option is in effect. However, because
1141 % of issues like this, it may be the safest practice to put all your
1142 % \label just after \caption rather than within \caption{}.
1144 % Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
1145 % option should be used if it is desired that the figures are to be
1146 % displayed while in draft mode.
1148 %\begin{figure}[!t]
1149 %\centering
1150 %\includegraphics[width=2.5in]{myfigure}
1151 % where an .eps filename suffix will be assumed under latex,
1152 % and a .pdf suffix will be assumed for pdflatex; or what has been declared
1153 % via \DeclareGraphicsExtensions.
1154 %\caption{Simulation Results}
1155 %\label{fig_sim}
1156 %\end{figure}
1158 % Note that IEEE typically puts floats only at the top, even when this
1159 % results in a large percentage of a column being occupied by floats.
1162 % An example of a double column floating figure using two subfigures.
1163 % (The subfig.sty package must be loaded for this to work.)
1164 % The subfigure \label commands are set within each subfloat command, the
1165 % \label for the overall figure must come after \caption.
1166 % \hfil must be used as a separator to get equal spacing.
1167 % The subfigure.sty package works much the same way, except \subfigure is
1168 % used instead of \subfloat.
1170 %\begin{figure*}[!t]
1171 %\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}%
1172 %\label{fig_first_case}}
1173 %\hfil
1174 %\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}%
1175 %\label{fig_second_case}}}
1176 %\caption{Simulation results}
1177 %\label{fig_sim}
1178 %\end{figure*}
1180 % Note that often IEEE papers with subfigures do not employ subfigure
1181 % captions (using the optional argument to \subfloat), but instead will
1182 % reference/describe all of them (a), (b), etc., within the main caption.
1185 % An example of a floating table. Note that, for IEEE style tables, the
1186 % \caption command should come BEFORE the table. Table text will default to
1187 % \footnotesize as IEEE normally uses this smaller font for tables.
1188 % The \label must come after \caption as always.
1190 %\begin{table}[!t]
1191 %% increase table row spacing, adjust to taste
1192 %\renewcommand{\arraystretch}{1.3}
1193 % if using array.sty, it might be a good idea to tweak the value of
1194 % \extrarowheight as needed to properly center the text within the cells
1195 %\caption{An Example of a Table}
1196 %\label{table_example}
1197 %\centering
1198 %% Some packages, such as MDW tools, offer better commands for making tables
1199 %% than the plain LaTeX2e tabular which is used here.
1200 %\begin{tabular}{|c||c|}
1201 %\hline
1202 %One & Two\\
1203 %\hline
1204 %Three & Four\\
1205 %\hline
1206 %\end{tabular}
1207 %\end{table}
1210 % Note that IEEE does not put floats in the very first column - or typically
1211 % anywhere on the first page for that matter. Also, in-text middle ("here")
1212 % positioning is not used. Most IEEE journals use top floats exclusively.
1213 % Note that, LaTeX2e, unlike IEEE journals, places footnotes above bottom
1214 % floats. This can be corrected via the \fnbelowfloat command of the
1215 % stfloats package.
1219 \section{Conclusion}
1220 The conclusion goes here.
1221 We have shown brm and proposed brm.
1223 Since we are not aware of any previous research on this topic and we
1224 are limited by space and time constraints, plenty of research remains
1225 to be done. There is plenty of room for further research in all parts
1226 of our analysis --- different methods of generating the $\vec p$ vectors
1227 can be explored; other data mining methods could be tried.
1228 It can be argued that many players adjust their style by game conditions
1229 (Go development era, handicap, komi and color, time limits, opponent)
1230 or styles might express differently in various game stages.
1231 More professional players could be consulted on the findings
1232 and for style scales calibration. Impact of handicap games on by-strength
1233 $\vec p$ distribution should be investigated.
1235 TODO: Future research --- Sparse PCA
1240 % if have a single appendix:
1241 %\appendix[Proof of the Zonklar Equations]
1242 % or
1243 %\appendix % for no appendix heading
1244 % do not use \section anymore after \appendix, only \section*
1245 % is possibly needed
1247 % use appendices with more than one appendix
1248 % then use \section to start each appendix
1249 % you must declare a \section before using any
1250 % \subsection or using \label (\appendices by itself
1251 % starts a section numbered zero.)
1255 %\appendices
1256 %\section{Proof of the First Zonklar Equation}
1257 %Appendix one text goes here.
1259 %% you can choose not to have a title for an appendix
1260 %% if you want by leaving the argument blank
1261 %\section{}
1262 %Appendix two text goes here.
1265 % use section* for acknowledgement
1266 \section*{Acknowledgment}
1267 \label{acknowledgement}
1269 We would like to thank Radka ``chidori'' Hane\v{c}kov\'{a} for the original research idea
1270 and X for reviewing our paper.
1271 We appreciate helpful comments on our general methodology
1272 by John Fairbairn, T. M. Hall, Cyril H\"oschl, Robert Jasiek, Franti\v{s}ek Mr\'{a}z
1273 and several GoDiscussions.com users. \cite{GoDiscThread}
1274 Finally, we are very grateful for detailed input on specific go styles
1275 by Alexander Dinerstein, Motoki Noguchi and V\'{i}t Brunner.
1278 % Can use something like this to put references on a page
1279 % by themselves when using endfloat and the captionsoff option.
1280 \ifCLASSOPTIONcaptionsoff
1281 \newpage
1286 % trigger a \newpage just before the given reference
1287 % number - used to balance the columns on the last page
1288 % adjust value as needed - may need to be readjusted if
1289 % the document is modified later
1290 %\IEEEtriggeratref{8}
1291 % The "triggered" command can be changed if desired:
1292 %\IEEEtriggercmd{\enlargethispage{-5in}}
1294 % references section
1296 % can use a bibliography generated by BibTeX as a .bbl file
1297 % BibTeX documentation can be easily obtained at:
1298 % http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
1299 % The IEEEtran BibTeX style support page is at:
1300 % http://www.michaelshell.org/tex/ieeetran/bibtex/
1301 \bibliographystyle{IEEEtran}
1302 % argument is your BibTeX string definitions and bibliography database(s)
1303 \bibliography{gostyle}
1305 % <OR> manually copy in the resultant .bbl file
1306 % set second argument of \begin to the number of references
1307 % (used to reserve space for the reference number labels box)
1308 %\begin{thebibliography}{1}
1310 %\bibitem{MasterMCTS}
1312 %\end{thebibliography}
1314 % biography section
1316 % If you have an EPS/PDF photo (graphicx package needed) extra braces are
1317 % needed around the contents of the optional argument to biography to prevent
1318 % the LaTeX parser from getting confused when it sees the complicated
1319 % \includegraphics command within an optional argument. (You could create
1320 % your own custom macro containing the \includegraphics command to make things
1321 % simpler here.)
1322 %\begin{biography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{mshell}}]{Michael Shell}
1323 % or if you just want to reserve a space for a photo:
1325 \begin{IEEEbiography}{Michael Shell}
1326 Biography text here.
1327 \end{IEEEbiography}
1329 % if you will not have a photo at all:
1330 \begin{IEEEbiographynophoto}{John Doe}
1331 Biography text here.
1332 \end{IEEEbiographynophoto}
1334 % insert where needed to balance the two columns on the last page with
1335 % biographies
1336 %\newpage
1338 \begin{IEEEbiographynophoto}{Jane Doe}
1339 Biography text here.
1340 \end{IEEEbiographynophoto}
1342 % You can push biographies down or up by placing
1343 % a \vfill before or after them. The appropriate
1344 % use of \vfill depends on what kind of text is
1345 % on the last page and whether or not the columns
1346 % are being equalized.
1348 %\vfill
1350 % Can be used to pull up biographies so that the bottom of the last one
1351 % is flush with the other column.
1352 %\enlargethispage{-5in}
1356 % that's all folks
1357 \end{document}