tex: Introduction, data extraction cleanups
[gostyle.git] / tex / gostyle.tex
blob204106d6f07580144f67bed33c68acb23c67a8d6
1 \documentclass[journal]{IEEEtran}
3 \usepackage{cite}
4 % cite.sty was written by Donald Arseneau
5 % V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
6 % \cite{} output to follow that of IEEE. Loading the cite package will
7 % result in citation numbers being automatically sorted and properly
8 % "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
9 % cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
10 % \cite will automatically add leading space, if needed. Use cite.sty's
11 % noadjust option (cite.sty V3.8 and later) if you want to turn this off.
12 % cite.sty is already installed on most LaTeX systems. Be sure and use
13 % version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
14 % not currently provide for hyperlinked citations.
15 % The latest version can be obtained at:
16 % http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
17 % The documentation is contained in the cite.sty file itself.
20 % *** GRAPHICS RELATED PACKAGES ***
22 \ifCLASSINFOpdf
23 % \usepackage[pdftex]{graphicx}
24 % declare the path(s) where your graphic files are
25 % \graphicspath{{../pdf/}{../jpeg/}}
26 % and their extensions so you won't have to specify these with
27 % every instance of \includegraphics
28 % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
29 \else
30 % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
31 % will default to the driver specified in the system graphics.cfg if no
32 % driver is specified.
33 % \usepackage[dvips]{graphicx}
34 \usepackage{graphicx}
35 % declare the path(s) where your graphic files are
36 % \graphicspath{{../eps/}}
37 % and their extensions so you won't have to specify these with
38 % every instance of \includegraphics
39 % \DeclareGraphicsExtensions{.eps}
40 \fi
42 \usepackage{threeparttable}
44 \usepackage{psgo}
45 \setgounit{0.4cm}
47 \usepackage{algorithm}
48 \usepackage{algorithmic}
49 %\usepackage{algpseudocode}
50 % WICKED: nefunguje ani jedno???
51 % algorithmic.sty can be obtained at:
52 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
53 % There is also a support site at:
54 % http://algorithms.berlios.de/index.html
55 % Also of interest may be the (relatively newer and more customizable)
56 % algorithmicx.sty package by Szasz Janos:
57 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/
59 % *** ALIGNMENT PACKAGES ***
61 %\usepackage{array}
62 % http://www.ctan.org/tex-archive/macros/latex/required/tools/
65 \usepackage{amsmath}
66 %\usepackage{mdwtab}
67 % http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/
70 % IEEEtran contains the IEEEeqnarray family of commands that can be used to
71 % generate multiline equations as well as matrices, tables, etc., of high
72 % quality.
74 %\usepackage{eqparbox}
75 % Also of notable interest is Scott Pakin's eqparbox package for creating
76 % (automatically sized) equal width boxes - aka "natural width parboxes".
77 % Available at:
78 % http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/
82 % *** SUBFIGURE PACKAGES ***
83 %\usepackage[tight,footnotesize]{subfigure}
84 % subfigure.sty was written by Steven Douglas Cochran. This package makes it
85 % easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
86 % work, it is a good idea to load it with the tight package option to reduce
87 % the amount of white space around the subfigures. subfigure.sty is already
88 % installed on most LaTeX systems. The latest version and documentation can
89 % be obtained at:
90 % http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
91 % subfigure.sty has been superceeded by subfig.sty.
95 %\usepackage[caption=false]{caption}
96 %\usepackage[font=footnotesize]{subfig}
97 % subfig.sty, also written by Steven Douglas Cochran, is the modern
98 % replacement for subfigure.sty. However, subfig.sty requires and
99 % automatically loads Axel Sommerfeldt's caption.sty which will override
100 % IEEEtran.cls handling of captions and this will result in nonIEEE style
101 % figure/table captions. To prevent this problem, be sure and preload
102 % caption.sty with its "caption=false" package option. This is will preserve
103 % IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later
104 % (recommended due to many improvements over 1.2) of subfig.sty supports
105 % the caption=false option directly:
106 %\usepackage[caption=false,font=footnotesize]{subfig}
108 % The latest version and documentation can be obtained at:
109 % http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
110 % The latest version and documentation of caption.sty can be obtained at:
111 % http://www.ctan.org/tex-archive/macros/latex/contrib/caption/
115 % *** FLOAT PACKAGES ***
117 %\usepackage{fixltx2e}
118 % fixltx2e, the successor to the earlier fix2col.sty, was written by
119 % Frank Mittelbach and David Carlisle. This package corrects a few problems
120 % in the LaTeX2e kernel, the most notable of which is that in current
121 % LaTeX2e releases, the ordering of single and double column floats is not
122 % guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
123 % single column figure to be placed prior to an earlier double column
124 % figure. The latest version and documentation can be found at:
125 % http://www.ctan.org/tex-archive/macros/latex/base/
129 %\usepackage{stfloats}
130 % stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
131 % the ability to do double column floats at the bottom of the page as well
132 % as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
133 % LaTeX2e). It also provides a command:
134 %\fnbelowfloat
135 % to enable the placement of footnotes below bottom floats (the standard
136 % LaTeX2e kernel puts them above bottom floats). This is an invasive package
137 % which rewrites many portions of the LaTeX2e float routines. It may not work
138 % with other packages that modify the LaTeX2e float routines. The latest
139 % version and documentation can be obtained at:
140 % http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
141 % Documentation is contained in the stfloats.sty comments as well as in the
142 % presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
143 % does not allow \baselineskip to stretch. Authors submitting work to the
144 % IEEE should note that IEEE rarely uses double column equations and
145 % that authors should try to avoid such use. Do not be tempted to use the
146 % cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
147 % not format its papers in such ways.
150 %\ifCLASSOPTIONcaptionsoff
151 % \usepackage[nomarkers]{endfloat}
152 % \let\MYoriglatexcaption\caption
153 % \renewcommand{\caption}[2][\relax]{\MYoriglatexcaption[#2]{#2}}
154 %\fi
155 % endfloat.sty was written by James Darrell McCauley and Jeff Goldberg.
156 % This package may be useful when used in conjunction with IEEEtran.cls'
157 % captionsoff option. Some IEEE journals/societies require that submissions
158 % have lists of figures/tables at the end of the paper and that
159 % figures/tables without any captions are placed on a page by themselves at
160 % the end of the document. If needed, the draftcls IEEEtran class option or
161 % \CLASSINPUTbaselinestretch interface can be used to increase the line
162 % spacing as well. Be sure and use the nomarkers option of endfloat to
163 % prevent endfloat from "marking" where the figures would have been placed
164 % in the text. The two hack lines of code above are a slight modification of
165 % that suggested by in the endfloat docs (section 8.3.1) to ensure that
166 % the full captions always appear in the list of figures/tables - even if
167 % the user used the short optional argument of \caption[]{}.
168 % IEEE papers do not typically make use of \caption[]'s optional argument,
169 % so this should not be an issue. A similar trick can be used to disable
170 % captions of packages such as subfig.sty that lack options to turn off
171 % the subcaptions:
172 % For subfig.sty:
173 % \let\MYorigsubfloat\subfloat
174 % \renewcommand{\subfloat}[2][\relax]{\MYorigsubfloat[]{#2}}
175 % For subfigure.sty:
176 % \let\MYorigsubfigure\subfigure
177 % \renewcommand{\subfigure}[2][\relax]{\MYorigsubfigure[]{#2}}
178 % However, the above trick will not work if both optional arguments of
179 % the \subfloat/subfig command are used. Furthermore, there needs to be a
180 % description of each subfigure *somewhere* and endfloat does not add
181 % subfigure captions to its list of figures. Thus, the best approach is to
182 % avoid the use of subfigure captions (many IEEE journals avoid them anyway)
183 % and instead reference/explain all the subfigures within the main caption.
184 % The latest version of endfloat.sty and its documentation can obtained at:
185 % http://www.ctan.org/tex-archive/macros/latex/contrib/endfloat/
187 % The IEEEtran \ifCLASSOPTIONcaptionsoff conditional can also be used
188 % later in the document, say, to conditionally put the References on a
189 % page by themselves.
191 % *** PDF, URL AND HYPERLINK PACKAGES ***
193 \usepackage{url}
194 % url.sty was written by Donald Arseneau. It provides better support for
195 % handling and breaking URLs. url.sty is already installed on most LaTeX
196 % systems. The latest version can be obtained at:
197 % http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
198 % Read the url.sty source comments for usage information. Basically,
199 % \url{my_url_here}.
202 % *** Do not adjust lengths that control margins, column widths, etc. ***
203 % *** Do not use packages that alter fonts (such as pslatex). ***
204 % There should be no need to do such things with IEEEtran.cls V1.6 and later.
205 % (Unless specifically asked to do so by the journal or conference you plan
206 % to submit to, of course. )
208 % correct bad hyphenation here
209 \hyphenation{op-tical net-works semi-conduc-tor know-ledge}
212 \begin{document}
214 % paper title
215 % can use linebreaks \\ within to get better formatting as desired
216 \title{On Move Pattern Trends\\in Large Go Games Corpus}
218 % use \thanks{} to gain access to the first footnote area
219 % a separate \thanks must be used for each paragraph as LaTeX2e's \thanks
220 % was not built to handle multiple paragraphs
221 \author{Petr~Baudi\v{s},~Josef~Moud\v{r}\'{i}k% <-this % stops a space
222 \thanks{P. Baudi\v{s} is student at the Faculty of Math and Physics, Charles University, Prague, CZ, and also does some of his Computer Go research as an employee of SUSE Labs Prague, Novell CZ.}% <-this % stops a space
223 \thanks{J. Moud\v{r}\'{i}k is student at the Faculty of Math and Physics, Charles University, Prague, CZ.}}
225 % note the % following the last \IEEEmembership and also \thanks -
226 % these prevent an unwanted space from occurring between the last author name
227 % and the end of the author line. i.e., if you had this:
229 % \author{....lastname \thanks{...} \thanks{...} }
230 % ^------------^------------^----Do not want these spaces!
232 % a space would be appended to the last name and could cause every name on that
233 % line to be shifted left slightly. This is one of those "LaTeX things". For
234 % instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
235 % "AB" then you have to do: "\textbf{A}\textbf{B}"
236 % \thanks is no different in this regard, so shield the last } of each \thanks
237 % that ends a line with a % and do not let a space in before the next \thanks.
238 % Spaces after \IEEEmembership other than the last one are OK (and needed) as
239 % you are supposed to have spaces between the names. For what it is worth,
240 % this is a minor point as most people would not even notice if the said evil
241 % space somehow managed to creep in.
244 % The paper headers
245 \markboth{Transactions on Computational Intelligence and AI in Games}%
246 {On Pattern Feature Trends in Large Go Game Corpus}
247 % The only time the second header will appear is for the odd numbered pages
248 % after the title page when using the twoside option.
250 % *** Note that you probably will NOT want to include the author's ***
251 % *** name in the headers of peer review papers. ***
252 % You can use \ifCLASSOPTIONpeerreview for conditional compilation here if
253 % you desire.
258 % If you want to put a publisher's ID mark on the page you can do it like
259 % this:
260 %\IEEEpubid{0000--0000/00\$00.00~\copyright~2007 IEEE}
261 % Remember, if you use this you must call \IEEEpubidadjcol in the second
262 % column for its text to clear the IEEEpubid mark.
266 % use for special paper notices
267 %\IEEEspecialpapernotice{(Invited Paper)}
272 % make the title area
273 \maketitle
276 \begin{abstract}
277 %\boldmath
279 We process a~large corpus of game records of the board game of Go and
280 propose a~way to extract summary information on played moves.
281 We then apply several basic data-mining methods on the summary
282 information to identify the most differentiating features within the
283 summary information, and discuss their correspondence with traditional
284 Go knowledge. We show mappings of the features to player attributes
285 like playing strength or informally perceived ``playing style'' (such as
286 territoriality or aggressivity), and propose applications including
287 seeding real-work ranks of internet players, aiding in Go study, or
288 contribution to Go-theoretical discussion on the scope of ``playing
289 style''.
291 \end{abstract}
292 % IEEEtran.cls defaults to using nonbold math in the Abstract.
293 % This preserves the distinction between vectors and scalars. However,
294 % if the journal you are submitting to favors bold math in the abstract,
295 % then you can use LaTeX's standard command \boldmath at the very start
296 % of the abstract to achieve this. Many IEEE journals frown on math
297 % in the abstract anyway.
299 % Note that keywords are not normally used for peerreview papers.
300 \begin{IEEEkeywords}
301 board games, go, data mining, pattern recongition, player strength, playing style,
302 neural networks, Kohonen maps, principal component analysis
303 \end{IEEEkeywords}
310 % For peer review papers, you can put extra information on the cover
311 % page as needed:
312 % \ifCLASSOPTIONpeerreview
313 % \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
314 % \fi
316 % For peerreview papers, this IEEEtran command inserts a page break and
317 % creates the second title. It will be ignored for other modes.
318 \IEEEpeerreviewmaketitle
322 \section{Introduction}
323 % The very first letter is a 2 line initial drop letter followed
324 % by the rest of the first word in caps.
326 % form to use if the first word consists of a single letter:
327 % \IEEEPARstart{A}{demo} file is ....
329 % form to use if you need the single drop letter followed by
330 % normal text (unknown if ever used by IEEE):
331 % \IEEEPARstart{A}{}demo file is ....
333 % Some journals put the first two words in caps:
334 % \IEEEPARstart{T}{his demo} file is ....
336 % Here we have the typical use of a "T" for an initial drop letter
337 % and "HIS" in caps to complete the first word.
338 \IEEEPARstart{T}{he} field of Computer Go usually focuses on the problem
339 of creating a~program to play the game, finding the best move from a~given
340 board position. \cite{GellySilver2008}
341 We will make use of one method developed in the course
342 of such research and apply it to the analysis of existing game records
343 with the aim of helping humans to play and understand the game better
344 instead.
346 Go is a~two-player full-information board game played
347 on a~square grid (usually $19\times19$ lines) with black and white
348 stones; the goal of the game is to surround the most territory and
349 capture enemy stones. We assume basic familiarity with the game.
351 Many Go players are eager to play using computers (usually over
352 the internet) and review games played by others on computers as well.
353 This means that large amounts of game records are collected and digitally
354 stored, enabling easy processing of such collections. However, so far
355 only little has been done with the available data --- we are aware
356 only of uses for simple win/loss statistics \cite{KGSStats} \cite{KGSAnalytics} \cite{ProGoR}
357 and ``next move'' statistics on a~specific position \cite{Kombilo} \cite{MoyoGo}.
359 We present a~more in-depth approach --- from all played moves, we devise
360 a~compact evaluation of each player. We then explore correlations between
361 evaluations of various players in light of externally given information.
362 This way, we can discover similarity between moves characteristics of
363 players with the same playing strength, or discuss the meaning of the
364 "playing style" concept on the assumption that similar playing styles
365 should yield similar moves characteristics.
368 \section{Data Extraction}
369 \label{pattern-vectors}
371 As the input of our analysis, we use large collections of game records%
372 \footnote{We use the SGF format \cite{SGF} in our implementation.}
373 grouped by the primary object of analysis (player name, player rank, etc.).
374 We process the games by object, generating a description for each
375 played move -- a {\em pattern}, being a combination of several
376 {\em pattern features} described below.
378 We keep track of the most
379 occuring patterns, finally composing $n$-dimensional {\em pattern vector}
380 $\vec p$ of per-pattern counts from the $n$ globally most frequent patterns%
381 \footnote{We use $n=500$ in our analysis.}
382 (the mapping from patterns to vector elements is common for all objects).
383 We can then process and compare just the pattern vectors.
385 The pattern vector elements can have diverse values since for each object,
386 we consider different number of games (and thus patterns).
387 Therefore, we linearly rescale and normalize the values to range $[-1,1]$,
388 the most frequent pattern having the value of $1$ and the least occuring
389 one being $-1$.%
390 \footnote{We did not investigate different methods of re-scaling the vectors;
391 that might be a good way of improving accuracy of our analysis.}
392 Thus, we obtain vectors describing relative frequency of played patterns
393 independent on number of gathered patterns.
395 \subsection{Pattern Features}
396 When deciding how to compose the patterns we use to describe moves,
397 we need to consider a specificity tradeoff --- overly general descriptions carry too few
398 information to discern various player attributes; too specific descriptions
399 gather too few specimen over the games sample and the vector differences are
400 not statistically significant.
402 We have chosen an intuitive and simple approach inspired by pattern features
403 used when computing Elo ratings for candidate patterns in Computer Go play.
404 \cite{Elo} Each pattern is a~combination of several {\em pattern features}
405 (name--value pairs) matched at the position of the played move.
406 We use these features:
408 \begin{itemize}
409 \item capture move flag
410 \item atari move flag
411 \item atari escape flag
412 \item contiguity-to-last flag --- whether the move has been played in one of 8 neighbors of the last move
413 \item contiguity-to-second-last flag
414 \item board edge distance --- only up to distance 4
415 \item spatial pattern --- configuration of stones around the played move
416 \end{itemize}
418 The spatial patterns are normalized (using a dictionary) to be always
419 black-to-play and maintain translational and rotational symmetry.
420 Configurations of radius between 2 and 9 in the gridcular metric%
421 \footnote{The {\em gridcular} metric
422 $d(x,y) = |\delta x| + |\delta y| + \max(|\delta x|, |\delta y|)$ defines
423 a circle-like structure on the Go board square grid. \cite{SpatPat} }
424 are matched.
426 Pattern vectors representing these features contain information on
427 played shape as well as basic representation of tactical dynamics
428 --- threats to capture stones, replying to last move, or ignoring
429 opponent's move elsewhere to return to an urgent local situation.
430 The shapes most frequently correspond to opening moves
431 (either in empty corners and sides, or as part of {\em joseki}
432 --- commonly played sequences) characteristic for a certain
433 strategic aim. In the opening, even a single-line difference
434 in the distance from the border can have dramatic impact on
435 further local and global development.
437 \subsection{Implementation}
439 We have implemented the data extraction by making use of the pattern
440 features matching implementation%
441 \footnote{The pattern features matching was developed according
442 to the Elo-rating playing scheme. \cite{Elo}}
443 within the Pachi go-playing program \cite{Pachi}.
444 We extract information on players by converting the SGF game
445 records to GTP stream \cite{GTP} that feeds Pachi's {\tt patternscan}
446 engine, outputting a~single {\em patternspec} (string representation
447 of the particular pattern features combination) per move. Of course,
448 only moves played by the appropriate color in the game are collected.
450 \section{Data Mining}
451 \label{data-mining}
453 To assess the properties of gathered \emph{pattern vectors}
454 and their influence on playing styles,
455 we have processes the data using a~few basic data minining techniques.
457 The first two methods ({\em analytic}) rely purely on data gathered
458 from the game collection
459 and serve to show internal structure and correlations within the data set.
461 Principal component analysis finds orthogonal vector components that
462 have the largest variance.
463 Reversing the process can indicate which patterns correlate with each component.
464 Additionally, PCA can be used as a vector-preprocessing for methods
465 that are (negatively) sensitive to \emph{pattern vector} component correlations.
467 A~second method -- Kohonen maps -- is based on the theory of self-organizing maps
468 of abstract units (neurons) that
469 compete against each other for the representation of the input space.
470 Because neurons in the network are organized in a two-dimensional plane,
471 the trained network virtually spreads vectors to the 2D plane,
472 allowing for simple visualization of clusters of players with similar ``properties''.
475 Furthermore, we have used two \emph{classification} methods that assign
476 each \emph{pattern vector} $\vec P$ some additional data (\emph{output vector} $\vec O$),
477 representing e.g.~information about styles, player's strength or even a country of origin.
478 Initially, the methods must be nonetheless calibrated (trained) on some expert or prior knowledge,
479 usually in the form of pairs of \emph{reference pattern vectors} and their \emph{output vectors}.
481 Moreover, the reference set can be divided into training and testing pairs
482 and the methods can be compared by the square error on testing data set (difference of
483 \emph{output vectors} approximated by the method and their real desired value).
485 %\footnote{However, note that dicrete characteristics such as country of origin are
486 %not very feasible to use here, since WHAT??? is that even true?? }
488 $k$-Nearest Neighbor \cite{CoverHart1967} classifier (the first method)
489 approximates $\vec O$ by composing the \emph{output vectors}
490 of $k$ \emph{reference pattern vectors} closest to $\vec P$.
492 The other classifier is based on a~multi-layer feed-forward Artificial Neural Network:
493 the neural network can learn correlations between input and output vectors
494 and generalize the ``knowledge'' to unknown vectors; it can be more flexible
495 in the interpretation of different pattern vector elements and discern more
496 complex relations than the kNN classifier, but e.g.~requires larger training sample.
498 \subsection{Principal Component Analysis}
499 \label{data-mining}
500 We use Principal Component Analysis \emph{PCA} \cite{Jolliffe1986}
501 to reduce the dimensions of the \emph{pattern vectors} while preserving
502 as much information as possible.
504 Briefly, PCA is an eigenvalue decomposition of a~covariance matrix of centered \emph{pattern vectors},
505 producing a~linear mapping $o$ from $n$-dimensional vector space
506 to a~reduced $m$-dimensional vector space.
507 The $m$ eigenvectors of the original vectors' covariance matrix
508 with the largest eigenvalues are used as the base of the reduced vector space;
509 the eigenvectors form the transformation matrix $W$.
511 For each original \emph{pattern vector} $\vec p_i$,
512 we obtain its new representation $\vec r_i$ in the PCA base
513 as shown in the following equation:
514 \begin{equation}
515 \vec r_i = W \cdot \vec p_i
516 \end{equation}
518 The whole process is described in the Algorithm \ref{alg:pca}.
520 \begin{algorithm}
521 \caption{PCA -- Principal Component Analysis}
522 \begin{algorithmic}
523 \label{alg:pca}
524 \REQUIRE{$m > 0$, set of players $R$ with \emph{pattern vectors} $p_r$}
525 \STATE $\vec \mu \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_r}$
526 \FOR{ $r \in R$}
527 \STATE $\vec p_r \leftarrow \vec p_r - \vec \mu$
528 \ENDFOR
529 \FOR{ $(i,j) \in \{1,... ,n\} \times \{1,... ,n\}$}
530 \STATE $\mathit{Cov}[i,j] \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_{ri} \cdot \vec p_{rj}}$
531 \ENDFOR
532 \STATE Compute Eigenvalue Decomposition of $\mathit{Cov}$ matrix
533 \STATE Get $m$ largest eigenvalues
534 \STATE Most significant eigenvectors ordered by decreasing eigenvalues form the rows of matrix $W$
535 \FOR{ $r \in R$}
536 \STATE $\vec r_r\leftarrow W \vec p_r$
537 \ENDFOR
538 \end{algorithmic}
539 \end{algorithm}
541 \label{pearson}
542 We will want to find correlations between PCA dimensions and
543 some prior knowledge (player rank, style vector).
544 We compute the well-known {\em Pearson product-moment correlation coefficient} \cite{Pearson}
545 values for this purpose, measuring the strength of the linear dependence%
546 \footnote{A desirable property of PMCC is that it is invariant to translations and rescaling
547 of the vectors.}
548 between the dimensions:
550 $$ r_{X,Y} = {{\rm cov}(X,Y) \over \sigma_X \sigma_Y} $$
552 \subsection{Kohonen Maps}
553 \label{koh}
554 Kohonen map is a self-organizing network with neurons organized in a~two-dimensional plane.
555 Neurons in the map compete for representation of portions of the input vector space.
556 Each neuron $\vec n$ represents a vector and the network is trained so that the neurons
557 that are topologically close tend to represent vectors that are close as well.
559 First, a~randomly initialized network is sequentially trained;
560 in each iteration, we choose a~random training vector $\vec t$
561 and find the neuron $\vec w$ that is closest to $\vec t$ in Euclidean metric
562 (we call $\vec w$ a~\emph{winner}).
564 We then adapt neurons $n$ from the neighbourhood of $\vec w$ employing an equation:
565 \begin{equation}
566 \vec n = \vec n + \alpha \cdot \mathit{Influence}(\vec w, \vec n) \cdot (\vec t - \vec n)
567 \end{equation}
568 where $\alpha$ is a learning parameter, usually decreasing in time.
569 $Influence()$ is a function that forces neurons to spread.
570 Such function is usually realised using a mexican hat function or a difference-of-gaussians
571 (see \cite{TODO} for details).
572 The state of the network can be evaluated by calculating mean square difference
573 between each $\vec t \in T$ and its corresponding \emph{winner neuron} $\vec w_t$:
574 \begin{equation}
575 \mathit{Error}(N,T) = \sum_{\vec t \in T}{|\vec w_t - \vec t|}
576 \end{equation}
579 \begin{algorithm}
580 \caption{Kohonen maps -- training}
581 \begin{algorithmic}
582 \label{alg:koh}
583 \REQUIRE{Set of training vectors $T$, input dimension $D$}
584 \REQUIRE{max number of iterations $M$, desired error $E$}
585 \STATE $N \leftarrow \{\vec n | \vec n$ random, $\mathit{dim}(\vec n) = D\}$
586 \REPEAT
587 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
588 \STATE $\vec t \leftarrow \mathit{PickRandom}(T)$
589 \FORALL{$\vec n \in N$}
590 \STATE $D[\vec n] \leftarrow \mathit{EuclideanDistance}(\vec n, \vec t)$
591 \ENDFOR
592 \STATE Find $ \vec w \in N$ so that $D[\vec w] <= D[\vec m], \forall \vec m \in N$
593 \FORALL{$\vec n \in \mathit{TopologicalNeigbors}(N, \vec w)$}
594 \STATE $\vec n \leftarrow \vec n + \alpha(It) \cdot \mathit{Influence}(\vec w, \vec n) \cdot ( \vec t - \vec n ) $
595 \ENDFOR
596 \UNTIL{$\mathit{Error}(N, T) < E$ or $ \mathit{It} > M$}
597 \end{algorithmic}
598 \end{algorithm}
601 \subsection{k-nearest Neighbors Classifier}
602 \label{knn}
603 Our goal is to approximate player's \emph{output vector} $\vec O$; we know his \emph{pattern vector} $\vec P$.
604 We further assume that similarities in players' \emph{pattern vectors}
605 uniformly correlate with similarities in players' \emph{output vectors}.
607 We require a set of reference players $R$ with known \emph{pattern vectors} $\vec p_r$
608 and \emph{output vectors} $\vec o_r$.
610 $\vec O$ is approximated as a~weighted average of \emph{output vectors}
611 $\vec o_i$ of $k$ players with \emph{pattern vectors} $\vec p_i$ closest to $\vec P$.
612 This is illustrated in the Algorithm \ref{alg:knn}.
613 Note that the weight is a function of distance and it is not explicitly defined in Algorithm \ref{alg:knn}.
614 During our research, exponentially decreasing weight has proven to be sufficient.
616 \begin{algorithm}
617 \caption{k-Nearest Neighbors}
618 \begin{algorithmic}
619 \label{alg:knn}
620 \REQUIRE{pattern vector $\vec P$, $k > 0$, set of reference players $R$}
621 \FORALL{$r \in R$ }
622 \STATE $D[r] \leftarrow \mathit{EuclideanDistance}(\vec p_r, \vec P)$
623 \ENDFOR
624 \STATE $N \leftarrow \mathit{SelectSmallest}(k, R, D)$
625 \STATE $\vec O \leftarrow \vec 0$
626 \FORALL{$r \in N $}
627 \STATE $\vec O \leftarrow \vec O + \mathit{Weight}(D[r]) \cdot \vec o_r $
628 \ENDFOR
629 \end{algorithmic}
630 \end{algorithm}
632 \subsection{Neural Network Classifier}
633 \label{neural-net}
635 Feedforward neural networks \cite{TODO} are known for their ability to generalize
636 and find correlations and patterns between input and output data, working as a classifier.
638 Before use, the network is iteratively trained on the training data
639 (again consisting of pairs of \emph{pattern vectors} as input and \emph{output vectors})
640 until the error on the training set is reasonably small.
642 %Neural network is an adaptive system that must undergo a training
643 %period similarly to the requirement
644 %of reference vectors for the k-Nearest Neighbors algorithm above.
646 \subsubsection{Computation and activation of the NN}
647 Technically, the neural network is a network of interconnected computational units called neurons.
648 A feedforward neural network has a layered topology;
649 it usually has one \emph{input layer}, one \emph{output layer}
650 and an arbitrary number of \emph{hidden layers} between.
652 Each neuron $i$ is connected to all neurons in the previous layer and each connection has its weight $w_{ij}$
654 The computation proceeds in discrete time steps.
655 In the first step, the neurons in the \emph{input layer}
656 are \emph{activated} according to the \emph{input vector}.
657 Then, we iteratively compute output of each neuron in the next layer
658 until the output layer is reached.
659 The activity of output layer is then presented as the result.
661 The activation $y_i$ of neuron $i$ from the layer $I$ is computed as
662 \begin{equation}
663 y_i = f\left(\sum_{j \in J}{w_{ij} y_j}\right)
664 \end{equation}
665 where $J$ is the previous layer, while $y_j$ is the activation for neurons from $J$ layer.
666 Function $f()$ is a~so-called \emph{activation function}
667 and its purpose is to bound the outputs of neurons.
668 A typical example of an activation function is the sigmoid function.%
669 \footnote{A special case of the logistic function, defined by the formula
670 $\sigma(x)=\frac{1}{1+e^{-(rx+k)}}$; parameters control the growth rate ($r$)
671 and the x-position ($k$).}
673 \subsubsection{Training}
674 The training of the feed-forward neural network usually involves some
675 modification of supervised Backpropagation learning algorithm. \cite{TODO}
676 We use first-order optimization algorithm called RPROP \cite{Riedmiller1993}.
678 %Because the \emph{reference set} is usually not very large,
679 %we have devised a simple method for its extension.
680 %This enhancement is based upon adding random linear combinations
681 %of \emph{style and pattern vectors} to the training set.
683 As outlined above, the training set $T$ consists of pairs of
684 input vectors (\emph{pattern vectors} $\vec p_i)$ and
685 desired \emph{output vectors} $\vec o_i$.
687 The training algorithm is shown in Algorithm \ref{alg:tnn}.
689 \begin{algorithm}
690 \caption{Training Neural Network}
691 \begin{algorithmic}
692 \label{alg:tnn}
693 \REQUIRE{Train set $T$, desired error $e$, max iterations $M$}
694 \STATE $N \leftarrow \mathit{RandomlyInitializedNetwork}()$
695 \STATE $\mathit{It} \leftarrow 0$
696 \REPEAT
697 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
698 \STATE $\Delta \vec w \leftarrow \vec 0$
699 \STATE $\mathit{TotalError} \leftarrow 0$
700 %\FORALL{$(\overrightarrow{Input}, \overrightarrow{DesiredOutput}) \in T$}
701 %\STATE $\overrightarrow{Output} \leftarrow Result(N, \overrightarrow{Input})$
702 %\STATE $E \leftarrow |\overrightarrow{DesiredOutput} - \overrightarrow{Output}|$
703 \FORALL{$(\mathit{Input}, \mathit{DesiredOutput}) \in T$}
704 \STATE $\mathit{Output} \leftarrow \mathit{Result}(N, \mathit{Input})$
705 \STATE $\mathit{Error} \leftarrow |\mathit{DesiredOutput} - \mathit{Output}|$
706 \STATE $\Delta \vec w \leftarrow \Delta \vec w + \mathit{WeightUpdate}(N,\mathit{Error})$
707 \STATE $\mathit{TotalError} \leftarrow \mathit{TotalError} + \mathit{Error}$
708 \ENDFOR
709 \STATE $N \leftarrow \mathit{ModifyWeights}(N, \Delta \vec w)$
710 \UNTIL{$\mathit{TotalError} < e$ or $ \mathit{It} > M$}
711 \end{algorithmic}
712 \end{algorithm}
714 \subsubsection{Architecture details}
715 TODO num layers, num neurons, ..
716 TODO patri to vubec sem, spise ne
718 \subsection{Implementation}
720 We have implemented the data mining methods as an open-source framework ``gostyle'' \cite{TODO},
721 made available under the GNU GPL licence.
722 The majority of our basic processing and the analysis parts are implemented in the Python \cite{Python2005} programming language.
724 Nonetheless, we use a number of external libraries, such as the MDP library \cite{MDP} (used for PCA analysis),
725 Kohonen library \cite{KohonenPy}.
727 The neural network part of the project is written using the excellent libfann C library\cite{Nissen2003}.
730 \section{Strength Estimator}
732 \begin{figure*}[!t]
733 \centering
734 \includegraphics[width=7in]{strength-pca}
735 \caption{PCA of by-strength vectors}
736 \label{fig:strength_pca}
737 \end{figure*}
739 First, we have used our framework to analyse correlations of pattern vectors
740 and playing strength. Like in other competitively played board games, Go players
741 receive real-world rating based on tournament games, and rank based on their
742 rating.\footnote{Elo-like rating system \cite{GoR} is usually used,
743 corresponding to even win chances for game of two players with the same rank,
744 and about 2:3 win chance for stronger in case of one rank difference.}%
745 \footnote{Professional ranks and dan ranks in some Asia countries may
746 be assigned differently.} The amateur ranks range from 30kyu (beginner) to
747 1kyu (intermediate) and then follows 1dan to 7dan (9dan in some systems;
748 top-level player). Multiple independent real-world ranking scales exist
749 (geographically based) and online servers maintain their own user ranking;
750 the difference can be up to several stones.
752 As the source game collection, we use Go Teaching Ladder
753 reviews\footnote{The reviews contain comments and variations --- we consider only the actual played game.}
754 \cite{GTL} --- this collection contains 7700 games of players with strength ranging
755 from 30k to 4d; we consider only even games with clear rank information, and then
756 randomly separate 770 games as a testing set. Since the rank information is provided
757 by the users and may not be consistent, we are forced to take a simplified look
758 at the ranks, discarding the differences between various systems and thus increasing
759 error in our model.\footnote{Since
760 our results seem satisfying, we did not pursue to try another collection;
761 one could e.g. look at game archives of some Go server.}
763 First, we have created a single pattern vector for each rank, from 30k to 4d;
764 we have performed PCA analysis on the pattern vectors, achieving near-perfect
765 rank correspondence in the first PCA dimension%
766 \footnote{The eigenvalue of the second dimension was four times smaller,
767 with no discernable structure revealed within the lower-order eigenvectors.}
768 (figure \ref{fig:strength_pca}).
770 We measure the accuracy of strength approximation by the first dimension
771 using Pearson's $r$ (see \ref{pearson}), yielding satisfying value $r=0.979$.
772 Using the eigenvector position directly for classification
773 of players within the test group yields MSE TODO, thus providing
774 reasonably satisfying accuracy.
776 To further enhance the strength estimator accuracy,
777 we have tried to train a NN classifier on our train set, consisting
778 of one $(\vec p, {\rm rank})$ pair per player --- we use the pattern vector
779 for activation of input neurons and rank number as result of the output
780 neuron. We then proceeded to test the NN on per-player pattern vectors built
781 from the games in the test set, yielding MSE of TODO with TODO games per player
782 on average.
785 \section{Style Estimator}
787 As a second case study for our pattern analysis, we investigate pattern vectors $\vec p$
788 of various well-known players, their relationships and correlations to prior
789 knowledge to explore its correlaction with extracted patterns. We look for
790 relationship between pattern vectors and perceived ``playing style'' and
791 attempt to use our classifiers to transform pattern vector $\vec p$ to style vector $\vec s$.
793 The source game collection is GoGoD Winter 2008 \cite{GoGoD} containing 55000
794 professional games, dating from the early Go history 1500 years ago to the present.
795 We consider only games of a small subset of players (fig. \ref{fig:style_marks});
796 we have chosen these for being well-known within the players community,
797 having large number of played games in our collection and not playing too long
798 ago.\footnote{Over time, many commonly used sequences get altered, adopted and
799 dismissed; usual playing conditions can also differ significantly.}
801 \subsection{Expert-based knowledge}
802 \label{style-vectors}
803 In order to provide a reference frame for our style analysis,
804 we have gathered some expert-based information about various
805 traditionally perceived style aspects.
806 This expert-based knowledge allows us to predict styles of unknown players based on
807 the similarity of their pattern vectors, as well as discover correlations between
808 styles and proportions of played patterns.
810 Experts were asked to mark each style aspect of the given players
811 on the scale from 1 to 10. The style aspects are defined as shown:
813 \vspace{4mm}
814 \noindent
815 %\begin{table}
816 \begin{center}
817 %\caption{Styles}
818 \begin{tabular}{|c|c|c|}
819 \hline
820 \multicolumn{3}{|c|}{Styles} \\ \hline
821 Style & 1 & 10\\ \hline
822 Territoriality $\tau$ & Moyo & Territory \\
823 Orthodoxity $\omega$ & Classic & Novel \\
824 Aggressivity $\alpha$ & Calm & Figting \\
825 Thickness $\theta$ & Safe & Shinogi \\ \hline
826 \end{tabular}
827 \end{center}
828 %\end{table}
829 \vspace{4mm}
831 Averaging this expert based evaluation yields
832 \emph{reference style vector} $\vec s_r$ (of dimension $4$) for each player $r$
833 from the set of \emph{reference players} $R$.
835 \begin{table}[!t]
836 % increase table row spacing, adjust to taste
837 \renewcommand{\arraystretch}{1.3}
838 \caption{Covariance Measure of Prior Information Dimensions}
839 \label{fig:style_marks_r}
840 \centering
841 % Some packages, such as MDW tools, offer better commands for making tables
842 % than the plain LaTeX2e tabular which is used here.
843 \begin{tabular}{|r||r||r||r||r||r|}
844 \hline
845 & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & year \\
846 \hline
847 $\tau$ &$1.000$&$-0.438$&$-0.581$&$ 0.721$&$ 0.108$\\
848 $\omega$& &$ 1.000$&$ 0.682$&$ 0.014$&$-0.021$\\
849 $\alpha$& & &$ 1.000$&$-0.081$&$ 0.030$\\
850 $\theta$& &\multicolumn{1}{c||}{---}
851 & &$ 1.000$&$-0.073$\\
852 y. & & & & &$ 1.000$\\
853 \hline
854 \end{tabular}
855 \end{table}
857 Three high-level Go players (Alexander Dinerstein 3-pro, Motoki Noguchi
858 7-dan and V\'{i}t Brunner 4-dan) have judged style of the reference
859 players.
860 The complete list of answers is in table \ref{fig:style_marks}.
861 Mean standard deviation of the answers is 0.952,
862 making the data reasonably reliable,
863 though much larger sample would of course be more desirable.
864 We have also found significant correlation between the various
865 style aspects, as shown by the Pearson's $r$ values
866 in table \ref{fig:style_marks_r}.
868 \begin{table}[!t]
869 % increase table row spacing, adjust to taste
870 \renewcommand{\arraystretch}{1.3}
871 \begin{threeparttable}
872 \caption{Expert-Based Style Aspects of Selected Professionals\tnote{1} \tnote{2}}
873 \label{fig:style_marks}
874 \centering
875 % Some packages, such as MDW tools, offer better commands for making tables
876 % than the plain LaTeX2e tabular which is used here.
877 \begin{tabular}{|c||c||c||c||c|}
878 \hline
879 Player & $\tau$ & $\omega$ & $\alpha$ & $\theta$ \\
880 \hline
881 Go Seigen\tnote{3} & $6.0 \pm 2.0$ & $9.0 \pm 1.0$ & $8.0 \pm 1.0$ & $5.0 \pm 1.0$ \\
882 Ishida Yoshio\tnote{4}&$8.0 \pm 1.4$ & $5.0 \pm 1.4$ & $3.3 \pm 1.2$ & $5.3 \pm 0.5$ \\
883 Miyazawa Goro & $1.5 \pm 0.5$ & $10 \pm 0 $ & $9.5 \pm 0.5$ & $4.0 \pm 1.0$ \\
884 Yi Ch'ang-ho\tnote{5}& $7.0 \pm 0.8$ & $5.0 \pm 1.4$ & $2.6 \pm 0.9$ & $2.6 \pm 1.2$ \\
885 Sakata Eio & $7.6 \pm 1.7$ & $4.6 \pm 0.5$ & $7.3 \pm 0.9$ & $8.0 \pm 1.6$ \\
886 Fujisawa Hideyuki & $3.5 \pm 0.5$ & $9.0 \pm 1.0$ & $7.0 \pm 0.0$ & $4.0 \pm 0.0$ \\
887 Otake Hideo & $4.3 \pm 0.5$ & $3.0 \pm 0.0$ & $4.6 \pm 1.2$ & $3.6 \pm 0.9$ \\
888 Kato Masao & $2.5 \pm 0.5$ & $4.5 \pm 1.5$ & $9.5 \pm 0.5$ & $4.0 \pm 0.0$ \\
889 Takemiya Masaki & $1.3 \pm 0.5$ & $6.3 \pm 2.1$ & $7.0 \pm 0.8$ & $1.3 \pm 0.5$ \\
890 Kobayashi Koichi & $9.0 \pm 1.0$ & $2.5 \pm 0.5$ & $2.5 \pm 0.5$ & $5.5 \pm 0.5$ \\
891 Cho Chikun & $9.0 \pm 0.8$ & $7.6 \pm 0.9$ & $6.6 \pm 1.2$ & $9.0 \pm 0.8$ \\
892 Ma Xiaochun & $8.0 \pm 2.2$ & $6.3 \pm 0.5$ & $5.6 \pm 1.9$ & $8.0 \pm 0.8$ \\
893 Yoda Norimoto & $6.3 \pm 1.7$ & $4.3 \pm 2.1$ & $4.3 \pm 2.1$ & $3.3 \pm 1.2$ \\
894 Luo Xihe & $7.3 \pm 0.9$ & $7.3 \pm 2.5$ & $7.6 \pm 0.9$ & $6.0 \pm 1.4$ \\
895 O Meien & $2.6 \pm 1.2$ & $9.6 \pm 0.5$ & $8.3 \pm 1.7$ & $3.6 \pm 1.2$ \\
896 Rui Naiwei & $4.6 \pm 1.2$ & $5.6 \pm 0.5$ & $9.0 \pm 0.8$ & $3.3 \pm 1.2$ \\
897 Yuki Satoshi & $3.0 \pm 1.0$ & $8.5 \pm 0.5$ & $9.0 \pm 1.0$ & $4.5 \pm 0.5$ \\
898 Hane Naoki & $7.5 \pm 0.5$ & $2.5 \pm 0.5$ & $4.0 \pm 0.0$ & $4.5 \pm 1.5$ \\
899 Takao Shinji & $5.0 \pm 1.0$ & $3.5 \pm 0.5$ & $5.5 \pm 1.5$ & $4.5 \pm 0.5$ \\
900 Yi Se-tol & $5.3 \pm 0.5$ & $6.6 \pm 2.5$ & $9.3 \pm 0.5$ & $6.6 \pm 1.2$ \\
901 Yamashita Keigo\tnote{4}&$2.0\pm 0.0$& $9.0 \pm 1.0$ & $9.5 \pm 0.5$ & $3.0 \pm 1.0$ \\
902 Cho U & $7.3 \pm 2.4$ & $6.0 \pm 0.8$ & $5.3 \pm 1.7$ & $6.3 \pm 1.7$ \\
903 Gu Li & $5.6 \pm 0.9$ & $7.0 \pm 0.8$ & $9.0 \pm 0.8$ & $4.0 \pm 0.8$ \\
904 Chen Yaoye & $6.0 \pm 1.0$ & $4.0 \pm 1.0$ & $6.0 \pm 1.0$ & $5.5 \pm 0.5$ \\
905 \hline
906 \end{tabular}
907 \begin{tablenotes}
908 \item [1] Including standard deviation. Only players where we got at least two out of tree answers are included.
909 \item [2] We consider era as one of factors when finding correlations with pattern vectors; we quantify era by taking median year over all games played by the player. Since this quantity does not fit to the table, we at least sort the players ascending by their median year.
910 \item [3] We do not consider games of Go Seigen due to him playing across several distinct Go-playing eras and thus specifically high diversity of patterns.
911 \item [4] We do not consider games of Ishida Yoshio and Yamashita Keigo for the PCA analysis since they are significant outliers, making high-order dimensions much like purely ``similarity to this player''. Takemiya Masaki has the similar effect for the first dimension, but this corresponds to common knowledge of him being an extreme proponent of anti-territorial (``moyo'') style.
912 \item [5] We consider games only up to year 2004, since Yi Ch'ang-ho was prominent representative of a balanced, careful player until then, but is regarded to have altered his style significantly afterwards.
913 \end{tablenotes}
914 \end{threeparttable}
915 \end{table}
917 \subsection{Style Components Analysis}
919 \begin{figure}[!t]
920 \centering
921 \includegraphics[width=3.75in]{style-pca}
922 \caption{PCA of per-player vectors}
923 \label{fig:style_pca}
924 \end{figure}
926 We have looked at the five most significant dimensions of the pattern data
927 yielded by the PCA analysis of the reference player set%
928 \footnote{We also tried to observe PCA effect of removing outlying Takemiya
929 Masaki. This second dimension strongly
930 correlated to territoriality and third dimension strongly correlacted to era,
931 however the first dimension remained mysteriously uncorrelated and with no
932 obvious interpretation.}
933 (fig. \ref{fig:style_pca} shows three).
934 We have again computed the Pearson's $r$ for all combinations of PCA dimensions
935 and dimensions of the prior knowledge style vectors to find correlations.
937 \begin{table}[!t]
938 % increase table row spacing, adjust to taste
939 \renewcommand{\arraystretch}{1.3}
940 \caption{Covariance Measure of Patterns and Prior Information}
941 \label{fig:style_r}
942 \centering
943 % Some packages, such as MDW tools, offer better commands for making tables
944 % than the plain LaTeX2e tabular which is used here.
945 \begin{tabular}{|c||c||c||c||c||c|}
946 \hline
947 Eigenval. & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & Year \\
948 \hline
949 0.447 & {\bf -0.530} & 0.323 & 0.298 & {\bf -0.554} & 0.090 \\
950 0.194 & {\bf -0.547} & 0.215 & 0.249 & -0.293 & {\bf -0.630} \\
951 0.046 & 0.131 & -0.002 & -0.128 & 0.242 & {\bf -0.630} \\
952 0.028 & -0.011 & 0.225 & 0.186 & 0.131 & 0.067 \\
953 0.024 & -0.181 & 0.174 & -0.032 & -0.216 & 0.352 \\
954 \hline
955 \end{tabular}
956 \end{table}
958 \begin{table}[!t]
959 % increase table row spacing, adjust to taste
960 \renewcommand{\arraystretch}{1.3}
961 \caption{Characteristic Patterns of PCA Dimensions}
962 \label{fig:style_ptterns}
963 \centering
964 % Some packages, such as MDW tools, offer better commands for making tables
965 % than the plain LaTeX2e tabular which is used here.
966 \begin{tabular}{|cccc|}
967 \hline
968 PCA1 top &
969 \begin{psgopartialboard*}{(8,1)(12,6)}
970 \stone[\marktr]{black}{k}{4}
971 \end{psgopartialboard*} &
972 \begin{psgopartialboard*}{(3,1)(5,6)}
973 \stone{white}{d}{3}
974 \stone[\marktr]{black}{d}{5}
975 \end{psgopartialboard*} &
976 \begin{psgopartialboard*}{(5,1)(10,6)}
977 \stone{white}{f}{3}
978 \stone[\marktr]{black}{j}{4}
979 \end{psgopartialboard*} \\
980 $0.447 \cdot$ & $0.274$ & $0.086$ & $0.083$ \\
981 & side extension or \par 4--4 corner opening & high corner approach & high distant pincer \\
982 PCA1 bot. &
983 \begin{psgopartialboard*}{(3,1)(7,6)}
984 \stone{white}{d}{4}
985 \stone[\marktr]{black}{f}{3}
986 \end{psgopartialboard*} &
987 \begin{psgopartialboard*}{(3,1)(7,6)}
988 \stone{white}{c}{6}
989 \stone{black}{d}{4}
990 \stone[\marktr]{black}{f}{3}
991 \end{psgopartialboard*} &
992 \begin{psgopartialboard*}{(3,1)(7,6)}
993 \stone{black}{d}{4}
994 \stone[\marktr]{black}{f}{3}
995 \end{psgopartialboard*} \\
996 $0.447 \cdot$ & $-0.399$ & $-0.399$ & $-0.177$ \\
997 & low corner approach & low corner reply & low corner enclosure \\
998 \hline
999 \end{tabular}
1000 \end{table}
1002 It is immediately
1003 obvious both from the measured $r$ and visual observation
1004 that by far the most significant vector corresponds very well
1005 to the player territoriality,\footnote{Cho Chikun, perhaps the best-known
1006 super-territorial player, is not well visible in the cluster, but he is
1007 positioned around $-0.8$ on the first dimension.}
1008 confirming the intuitive notion that this aspect of style
1009 is the one easiest to pin-point and also
1010 most obvious in the played shapes and sequences
1011 (that can obviously aim directly at taking secure territory
1012 or building center-oriented framework). Thick (solid) play also plays
1013 a role, but these two style dimensions have been already shown
1014 to be correlated in prior data.
1016 In other PCA dimensions correspond well to to identify and name, but there
1017 certainly is some influence of the styles on the patterns;
1018 the found correlations are presented in table \ref{fig:style_r}.
1019 (Larger absolute value means better linear correspondence.)
1021 We also list the characteristic spatial patterns of the PCA dimension
1022 extremes (table \ref{fig:style_patterns}) --- however, naive inference
1023 of characteristic patterns based on projection matrix coefficients
1024 does not work well, better methods will have to be researched.%
1025 \footnote{For example, as one of highly ranked ``Takemiya's'' PCA1 patterns,
1026 3,3 corner opening was generated, completely inappropriately;
1027 it reflects some weak ordering in bottom half of the dimension,
1028 not global ordering within the dimension.}
1030 We have not found significant correspondence for the style aspects
1031 representing aggressiveness and novelty of play; this means either
1032 these are not as well defined, the prior information do not represent
1033 them accurately, or we cannot capture them well with our chosen pattern
1034 extraction techniques.
1036 We believe that the next step
1037 in interpreting our results will be more refined prior information input
1038 and precise analysis by Go experts.
1040 Kohonen map view.
1042 \subsection{Style Classification}
1044 Naive PCA-based classifier had MSE xyzzy.
1046 kNN-based classifier in pattern space had MSE brm.
1048 We then tried to apply the NN classifier with linear output function on the dataset
1049 and that yielded Y (see fig. Z), with MSE abcd.
1052 \section{Proposed Applications}
1054 We believe that our findings might be useful for many applications
1055 in the area of Go support software as well as Go-playing computer engines.
1057 The style analysis can be an excellent teaching aid --- classifying style
1058 dimensions based on player's pattern vector, many study recommendations
1059 can be given, e.g. about the professional games to replay, the goal being
1060 balancing understanding of various styles to achieve well-rounded skill set.
1061 This was also our original aim when starting the research and a user-friendly
1062 tool based on our work is now being created.
1064 We hope that more strong players will look into the style dimensions found
1065 by our statistical analysis --- analysis of most played patterns of prospective
1066 opponents might prepare for the game, but we especially hope that new insights
1067 on strategic purposes of various shapes and general human understanding
1068 of the game might be achieved by investigating the style-specific patterns.
1070 Classifying playing strength of a pattern vector of a player can be used
1071 e.g. to help determine initial real-world rating of a player before their
1072 first tournament based on games played on the internet; some players especially
1073 in less populated areas could get fairly strong before playing their first
1074 real tournament.
1076 Analysis of pattern vectors extracted from games of Go-playing programs
1077 in light of the shown strength and style distributions might help to
1078 highlight some weaknesses and room for improvements. (However, since
1079 correlation does not imply causation, simply optimizing Go-playing programs
1080 according to these vectors is unlikely to yield good results.)
1081 Another interesting applications in Go-playing programs might be strength
1082 adjustment; the program can classify the player's level based on the pattern
1083 vector from its previous games and auto-adjust its difficulty settings
1084 accordingly to provide more even games for beginners.
1087 % An example of a floating figure using the graphicx package.
1088 % Note that \label must occur AFTER (or within) \caption.
1089 % For figures, \caption should occur after the \includegraphics.
1090 % Note that IEEEtran v1.7 and later has special internal code that
1091 % is designed to preserve the operation of \label within \caption
1092 % even when the captionsoff option is in effect. However, because
1093 % of issues like this, it may be the safest practice to put all your
1094 % \label just after \caption rather than within \caption{}.
1096 % Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
1097 % option should be used if it is desired that the figures are to be
1098 % displayed while in draft mode.
1100 %\begin{figure}[!t]
1101 %\centering
1102 %\includegraphics[width=2.5in]{myfigure}
1103 % where an .eps filename suffix will be assumed under latex,
1104 % and a .pdf suffix will be assumed for pdflatex; or what has been declared
1105 % via \DeclareGraphicsExtensions.
1106 %\caption{Simulation Results}
1107 %\label{fig_sim}
1108 %\end{figure}
1110 % Note that IEEE typically puts floats only at the top, even when this
1111 % results in a large percentage of a column being occupied by floats.
1114 % An example of a double column floating figure using two subfigures.
1115 % (The subfig.sty package must be loaded for this to work.)
1116 % The subfigure \label commands are set within each subfloat command, the
1117 % \label for the overall figure must come after \caption.
1118 % \hfil must be used as a separator to get equal spacing.
1119 % The subfigure.sty package works much the same way, except \subfigure is
1120 % used instead of \subfloat.
1122 %\begin{figure*}[!t]
1123 %\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}%
1124 %\label{fig_first_case}}
1125 %\hfil
1126 %\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}%
1127 %\label{fig_second_case}}}
1128 %\caption{Simulation results}
1129 %\label{fig_sim}
1130 %\end{figure*}
1132 % Note that often IEEE papers with subfigures do not employ subfigure
1133 % captions (using the optional argument to \subfloat), but instead will
1134 % reference/describe all of them (a), (b), etc., within the main caption.
1137 % An example of a floating table. Note that, for IEEE style tables, the
1138 % \caption command should come BEFORE the table. Table text will default to
1139 % \footnotesize as IEEE normally uses this smaller font for tables.
1140 % The \label must come after \caption as always.
1142 %\begin{table}[!t]
1143 %% increase table row spacing, adjust to taste
1144 %\renewcommand{\arraystretch}{1.3}
1145 % if using array.sty, it might be a good idea to tweak the value of
1146 % \extrarowheight as needed to properly center the text within the cells
1147 %\caption{An Example of a Table}
1148 %\label{table_example}
1149 %\centering
1150 %% Some packages, such as MDW tools, offer better commands for making tables
1151 %% than the plain LaTeX2e tabular which is used here.
1152 %\begin{tabular}{|c||c|}
1153 %\hline
1154 %One & Two\\
1155 %\hline
1156 %Three & Four\\
1157 %\hline
1158 %\end{tabular}
1159 %\end{table}
1162 % Note that IEEE does not put floats in the very first column - or typically
1163 % anywhere on the first page for that matter. Also, in-text middle ("here")
1164 % positioning is not used. Most IEEE journals use top floats exclusively.
1165 % Note that, LaTeX2e, unlike IEEE journals, places footnotes above bottom
1166 % floats. This can be corrected via the \fnbelowfloat command of the
1167 % stfloats package.
1171 \section{Conclusion}
1172 The conclusion goes here.
1173 We have shown brm and proposed brm.
1175 Since we are not aware of any previous research on this topic and we
1176 are limited by space and time constraints, plenty of research remains
1177 to be done. There is plenty of room for further research in all parts
1178 of our analysis --- different methods of generating the $\vec p$ vectors
1179 can be explored; other data mining methods could be tried.
1180 It can be argued that many players adjust their style by game conditions
1181 (Go development era, handicap, komi and color, time limits, opponent)
1182 or styles might express differently in various game stages.
1183 More professional players could be consulted on the findings
1184 and for style scales calibration. Impact of handicap games on by-strength
1185 $\vec p$ distribution should be investigated.
1187 TODO: Future research --- Sparse PCA
1192 % if have a single appendix:
1193 %\appendix[Proof of the Zonklar Equations]
1194 % or
1195 %\appendix % for no appendix heading
1196 % do not use \section anymore after \appendix, only \section*
1197 % is possibly needed
1199 % use appendices with more than one appendix
1200 % then use \section to start each appendix
1201 % you must declare a \section before using any
1202 % \subsection or using \label (\appendices by itself
1203 % starts a section numbered zero.)
1207 %\appendices
1208 %\section{Proof of the First Zonklar Equation}
1209 %Appendix one text goes here.
1211 %% you can choose not to have a title for an appendix
1212 %% if you want by leaving the argument blank
1213 %\section{}
1214 %Appendix two text goes here.
1217 % use section* for acknowledgement
1218 \section*{Acknowledgment}
1219 \label{acknowledgement}
1221 We would like to thank Radka ``chidori'' Hane\v{c}kov\'{a} for the original research idea
1222 and X for reviewing our paper.
1223 We appreciate helpful comments on our general methodology
1224 by John Fairbairn, T. M. Hall, Cyril H\"oschl, Robert Jasiek, Franti\v{s}ek Mr\'{a}z
1225 and several GoDiscussions.com users. \cite{GoDiscThread}
1226 Finally, we are very grateful for detailed input on specific go styles
1227 by Alexander Dinerstein, Motoki Noguchi and V\'{i}t Brunner.
1230 % Can use something like this to put references on a page
1231 % by themselves when using endfloat and the captionsoff option.
1232 \ifCLASSOPTIONcaptionsoff
1233 \newpage
1238 % trigger a \newpage just before the given reference
1239 % number - used to balance the columns on the last page
1240 % adjust value as needed - may need to be readjusted if
1241 % the document is modified later
1242 %\IEEEtriggeratref{8}
1243 % The "triggered" command can be changed if desired:
1244 %\IEEEtriggercmd{\enlargethispage{-5in}}
1246 % references section
1248 % can use a bibliography generated by BibTeX as a .bbl file
1249 % BibTeX documentation can be easily obtained at:
1250 % http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
1251 % The IEEEtran BibTeX style support page is at:
1252 % http://www.michaelshell.org/tex/ieeetran/bibtex/
1253 \bibliographystyle{IEEEtran}
1254 % argument is your BibTeX string definitions and bibliography database(s)
1255 \bibliography{gostyle}
1257 % <OR> manually copy in the resultant .bbl file
1258 % set second argument of \begin to the number of references
1259 % (used to reserve space for the reference number labels box)
1260 %\begin{thebibliography}{1}
1262 %\bibitem{MasterMCTS}
1264 %\end{thebibliography}
1266 % biography section
1268 % If you have an EPS/PDF photo (graphicx package needed) extra braces are
1269 % needed around the contents of the optional argument to biography to prevent
1270 % the LaTeX parser from getting confused when it sees the complicated
1271 % \includegraphics command within an optional argument. (You could create
1272 % your own custom macro containing the \includegraphics command to make things
1273 % simpler here.)
1274 %\begin{biography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{mshell}}]{Michael Shell}
1275 % or if you just want to reserve a space for a photo:
1277 \begin{IEEEbiography}{Michael Shell}
1278 Biography text here.
1279 \end{IEEEbiography}
1281 % if you will not have a photo at all:
1282 \begin{IEEEbiographynophoto}{John Doe}
1283 Biography text here.
1284 \end{IEEEbiographynophoto}
1286 % insert where needed to balance the two columns on the last page with
1287 % biographies
1288 %\newpage
1290 \begin{IEEEbiographynophoto}{Jane Doe}
1291 Biography text here.
1292 \end{IEEEbiographynophoto}
1294 % You can push biographies down or up by placing
1295 % a \vfill before or after them. The appropriate
1296 % use of \vfill depends on what kind of text is
1297 % on the last page and whether or not the columns
1298 % are being equalized.
1300 %\vfill
1302 % Can be used to pull up biographies so that the bottom of the last one
1303 % is flush with the other column.
1304 %\enlargethispage{-5in}
1308 % that's all folks
1309 \end{document}