tex: PREPR1
[gostyle.git] / tex / gostyle.tex
blob62bce65ecae5b30adfb284b37414e70502c48612
1 \documentclass[journal]{IEEEtran}
3 \usepackage{cite}
4 % cite.sty was written by Donald Arseneau
5 % V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
6 % \cite{} output to follow that of IEEE. Loading the cite package will
7 % result in citation numbers being automatically sorted and properly
8 % "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
9 % cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
10 % \cite will automatically add leading space, if needed. Use cite.sty's
11 % noadjust option (cite.sty V3.8 and later) if you want to turn this off.
12 % cite.sty is already installed on most LaTeX systems. Be sure and use
13 % version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
14 % not currently provide for hyperlinked citations.
15 % The latest version can be obtained at:
16 % http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
17 % The documentation is contained in the cite.sty file itself.
20 % *** GRAPHICS RELATED PACKAGES ***
22 \ifCLASSINFOpdf
23 % \usepackage[pdftex]{graphicx}
24 % declare the path(s) where your graphic files are
25 % \graphicspath{{../pdf/}{../jpeg/}}
26 % and their extensions so you won't have to specify these with
27 % every instance of \includegraphics
28 % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
29 \else
30 % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
31 % will default to the driver specified in the system graphics.cfg if no
32 % driver is specified.
33 % \usepackage[dvips]{graphicx}
34 \usepackage{graphicx}
35 % declare the path(s) where your graphic files are
36 % \graphicspath{{../eps/}}
37 % and their extensions so you won't have to specify these with
38 % every instance of \includegraphics
39 % \DeclareGraphicsExtensions{.eps}
40 \fi
42 \usepackage{threeparttable}
44 \usepackage{psgo}
45 \setgounit{0.4cm}
47 \usepackage{algorithm}
48 \usepackage{algorithmic}
49 %\usepackage{algpseudocode}
50 % WICKED: nefunguje ani jedno???
51 % algorithmic.sty can be obtained at:
52 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
53 % There is also a support site at:
54 % http://algorithms.berlios.de/index.html
55 % Also of interest may be the (relatively newer and more customizable)
56 % algorithmicx.sty package by Szasz Janos:
57 % http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/
59 % *** ALIGNMENT PACKAGES ***
61 %\usepackage{array}
62 % http://www.ctan.org/tex-archive/macros/latex/required/tools/
65 \usepackage{amsmath}
66 %\usepackage{mdwtab}
67 % http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/
70 % IEEEtran contains the IEEEeqnarray family of commands that can be used to
71 % generate multiline equations as well as matrices, tables, etc., of high
72 % quality.
74 %\usepackage{eqparbox}
75 % Also of notable interest is Scott Pakin's eqparbox package for creating
76 % (automatically sized) equal width boxes - aka "natural width parboxes".
77 % Available at:
78 % http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/
82 % *** SUBFIGURE PACKAGES ***
83 %\usepackage[tight,footnotesize]{subfigure}
84 % subfigure.sty was written by Steven Douglas Cochran. This package makes it
85 % easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
86 % work, it is a good idea to load it with the tight package option to reduce
87 % the amount of white space around the subfigures. subfigure.sty is already
88 % installed on most LaTeX systems. The latest version and documentation can
89 % be obtained at:
90 % http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
91 % subfigure.sty has been superceeded by subfig.sty.
95 %\usepackage[caption=false]{caption}
96 %\usepackage[font=footnotesize]{subfig}
97 % subfig.sty, also written by Steven Douglas Cochran, is the modern
98 % replacement for subfigure.sty. However, subfig.sty requires and
99 % automatically loads Axel Sommerfeldt's caption.sty which will override
100 % IEEEtran.cls handling of captions and this will result in nonIEEE style
101 % figure/table captions. To prevent this problem, be sure and preload
102 % caption.sty with its "caption=false" package option. This is will preserve
103 % IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later
104 % (recommended due to many improvements over 1.2) of subfig.sty supports
105 % the caption=false option directly:
106 %\usepackage[caption=false,font=footnotesize]{subfig}
108 % The latest version and documentation can be obtained at:
109 % http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
110 % The latest version and documentation of caption.sty can be obtained at:
111 % http://www.ctan.org/tex-archive/macros/latex/contrib/caption/
115 % *** FLOAT PACKAGES ***
117 %\usepackage{fixltx2e}
118 % fixltx2e, the successor to the earlier fix2col.sty, was written by
119 % Frank Mittelbach and David Carlisle. This package corrects a few problems
120 % in the LaTeX2e kernel, the most notable of which is that in current
121 % LaTeX2e releases, the ordering of single and double column floats is not
122 % guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
123 % single column figure to be placed prior to an earlier double column
124 % figure. The latest version and documentation can be found at:
125 % http://www.ctan.org/tex-archive/macros/latex/base/
129 %\usepackage{stfloats}
130 % stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
131 % the ability to do double column floats at the bottom of the page as well
132 % as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
133 % LaTeX2e). It also provides a command:
134 %\fnbelowfloat
135 % to enable the placement of footnotes below bottom floats (the standard
136 % LaTeX2e kernel puts them above bottom floats). This is an invasive package
137 % which rewrites many portions of the LaTeX2e float routines. It may not work
138 % with other packages that modify the LaTeX2e float routines. The latest
139 % version and documentation can be obtained at:
140 % http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
141 % Documentation is contained in the stfloats.sty comments as well as in the
142 % presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
143 % does not allow \baselineskip to stretch. Authors submitting work to the
144 % IEEE should note that IEEE rarely uses double column equations and
145 % that authors should try to avoid such use. Do not be tempted to use the
146 % cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
147 % not format its papers in such ways.
150 %\ifCLASSOPTIONcaptionsoff
151 % \usepackage[nomarkers]{endfloat}
152 % \let\MYoriglatexcaption\caption
153 % \renewcommand{\caption}[2][\relax]{\MYoriglatexcaption[#2]{#2}}
154 %\fi
155 % endfloat.sty was written by James Darrell McCauley and Jeff Goldberg.
156 % This package may be useful when used in conjunction with IEEEtran.cls'
157 % captionsoff option. Some IEEE journals/societies require that submissions
158 % have lists of figures/tables at the end of the paper and that
159 % figures/tables without any captions are placed on a page by themselves at
160 % the end of the document. If needed, the draftcls IEEEtran class option or
161 % \CLASSINPUTbaselinestretch interface can be used to increase the line
162 % spacing as well. Be sure and use the nomarkers option of endfloat to
163 % prevent endfloat from "marking" where the figures would have been placed
164 % in the text. The two hack lines of code above are a slight modification of
165 % that suggested by in the endfloat docs (section 8.3.1) to ensure that
166 % the full captions always appear in the list of figures/tables - even if
167 % the user used the short optional argument of \caption[]{}.
168 % IEEE papers do not typically make use of \caption[]'s optional argument,
169 % so this should not be an issue. A similar trick can be used to disable
170 % captions of packages such as subfig.sty that lack options to turn off
171 % the subcaptions:
172 % For subfig.sty:
173 % \let\MYorigsubfloat\subfloat
174 % \renewcommand{\subfloat}[2][\relax]{\MYorigsubfloat[]{#2}}
175 % For subfigure.sty:
176 % \let\MYorigsubfigure\subfigure
177 % \renewcommand{\subfigure}[2][\relax]{\MYorigsubfigure[]{#2}}
178 % However, the above trick will not work if both optional arguments of
179 % the \subfloat/subfig command are used. Furthermore, there needs to be a
180 % description of each subfigure *somewhere* and endfloat does not add
181 % subfigure captions to its list of figures. Thus, the best approach is to
182 % avoid the use of subfigure captions (many IEEE journals avoid them anyway)
183 % and instead reference/explain all the subfigures within the main caption.
184 % The latest version of endfloat.sty and its documentation can obtained at:
185 % http://www.ctan.org/tex-archive/macros/latex/contrib/endfloat/
187 % The IEEEtran \ifCLASSOPTIONcaptionsoff conditional can also be used
188 % later in the document, say, to conditionally put the References on a
189 % page by themselves.
191 % *** PDF, URL AND HYPERLINK PACKAGES ***
193 \usepackage{url}
194 % url.sty was written by Donald Arseneau. It provides better support for
195 % handling and breaking URLs. url.sty is already installed on most LaTeX
196 % systems. The latest version can be obtained at:
197 % http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
198 % Read the url.sty source comments for usage information. Basically,
199 % \url{my_url_here}.
202 % *** Do not adjust lengths that control margins, column widths, etc. ***
203 % *** Do not use packages that alter fonts (such as pslatex). ***
204 % There should be no need to do such things with IEEEtran.cls V1.6 and later.
205 % (Unless specifically asked to do so by the journal or conference you plan
206 % to submit to, of course. )
208 % correct bad hyphenation here
209 \hyphenation{op-tical net-works semi-conduc-tor know-ledge}
212 \begin{document}
214 % paper title
215 % can use linebreaks \\ within to get better formatting as desired
216 \title{On Move Pattern Trends\\in Large Go Games Corpus}
218 % use \thanks{} to gain access to the first footnote area
219 % a separate \thanks must be used for each paragraph as LaTeX2e's \thanks
220 % was not built to handle multiple paragraphs
221 \author{Petr~Baudi\v{s},~Josef~Moud\v{r}\'{i}k% <-this % stops a space
222 \thanks{P. Baudi\v{s} is student at the Faculty of Math and Physics, Charles University, Prague, CZ, and also does some of his Computer Go research as an employee of SUSE Labs Prague, Novell CZ.}% <-this % stops a space
223 \thanks{J. Moud\v{r}\'{i}k is student at the Faculty of Math and Physics, Charles University, Prague, CZ.}}
225 % note the % following the last \IEEEmembership and also \thanks -
226 % these prevent an unwanted space from occurring between the last author name
227 % and the end of the author line. i.e., if you had this:
229 % \author{....lastname \thanks{...} \thanks{...} }
230 % ^------------^------------^----Do not want these spaces!
232 % a space would be appended to the last name and could cause every name on that
233 % line to be shifted left slightly. This is one of those "LaTeX things". For
234 % instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
235 % "AB" then you have to do: "\textbf{A}\textbf{B}"
236 % \thanks is no different in this regard, so shield the last } of each \thanks
237 % that ends a line with a % and do not let a space in before the next \thanks.
238 % Spaces after \IEEEmembership other than the last one are OK (and needed) as
239 % you are supposed to have spaces between the names. For what it is worth,
240 % this is a minor point as most people would not even notice if the said evil
241 % space somehow managed to creep in.
244 % The paper headers
245 \markboth{Transactions on Computational Intelligence and AI in Games --- PREPR1}%
246 {On Move Pattern Trends in Large Go Games Corpus --- PREPR1}
247 % The only time the second header will appear is for the odd numbered pages
248 % after the title page when using the twoside option.
250 % *** Note that you probably will NOT want to include the author's ***
251 % *** name in the headers of peer review papers. ***
252 % You can use \ifCLASSOPTIONpeerreview for conditional compilation here if
253 % you desire.
258 % If you want to put a publisher's ID mark on the page you can do it like
259 % this:
260 %\IEEEpubid{0000--0000/00\$00.00~\copyright~2007 IEEE}
261 % Remember, if you use this you must call \IEEEpubidadjcol in the second
262 % column for its text to clear the IEEEpubid mark.
266 % use for special paper notices
267 %\IEEEspecialpapernotice{(Invited Paper)}
272 % make the title area
273 \maketitle
276 \begin{abstract}
277 %\boldmath
279 We process a~large corpus of game records of the board game of Go and propose
280 a~way of extracting summary information on played moves. We then apply several
281 basic data-mining methods on the summary information to identify the most
282 differentiating features within the summary information, and discuss their
283 correspondence with traditional Go knowledge. We show statistically significant
284 mappings of the features to player attributes such as playing strength or
285 informally perceived ``playing style'' (e.g. territoriality or aggressivity),
286 describe accurate classifiers for these attributes, and propose applications
287 including seeding real-work ranks of internet players, aiding in Go study and
288 tuning of Go-playing programs, or contribution to Go-theoretical discussion on
289 the scope of ``playing style''.
291 \end{abstract}
292 % IEEEtran.cls defaults to using nonbold math in the Abstract.
293 % This preserves the distinction between vectors and scalars. However,
294 % if the journal you are submitting to favors bold math in the abstract,
295 % then you can use LaTeX's standard command \boldmath at the very start
296 % of the abstract to achieve this. Many IEEE journals frown on math
297 % in the abstract anyway.
299 % Note that keywords are not normally used for peerreview papers.
300 \begin{IEEEkeywords}
301 Board games, Evaluation, Function approximation, Go, Machine learning, Neural networks, User modelling
302 \end{IEEEkeywords}
309 % For peer review papers, you can put extra information on the cover
310 % page as needed:
311 % \ifCLASSOPTIONpeerreview
312 % \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
313 % \fi
315 % For peerreview papers, this IEEEtran command inserts a page break and
316 % creates the second title. It will be ignored for other modes.
317 \IEEEpeerreviewmaketitle
320 This work has been submitted to the IEEE for possible publication. Copyright
321 may be transferred without notice, after which this version may no longer be
322 accessible.
325 \section{Introduction}
326 % The very first letter is a 2 line initial drop letter followed
327 % by the rest of the first word in caps.
329 % form to use if the first word consists of a single letter:
330 % \IEEEPARstart{A}{demo} file is ....
332 % form to use if you need the single drop letter followed by
333 % normal text (unknown if ever used by IEEE):
334 % \IEEEPARstart{A}{}demo file is ....
336 % Some journals put the first two words in caps:
337 % \IEEEPARstart{T}{his demo} file is ....
339 % Here we have the typical use of a "T" for an initial drop letter
340 % and "HIS" in caps to complete the first word.
341 \IEEEPARstart{T}{he} field of Computer Go usually focuses on the problem
342 of creating a~program to play the game, finding the best move from a~given
343 board position. \cite{GellySilver2008}
344 We will make use of one method developed in the course
345 of such research and apply it to the analysis of existing game records
346 with the aim of helping humans to play and understand the game better
347 instead.
349 Go is a~two-player full-information board game played
350 on a~square grid (usually $19\times19$ lines) with black and white
351 stones; the goal of the game is to surround the most territory and
352 capture enemy stones. We assume basic familiarity with the game.
354 Many Go players are eager to play using computers (usually over
355 the internet) and review games played by others on computers as well.
356 This means that large amounts of game records are collected and digitally
357 stored, enabling easy processing of such collections. However, so far
358 only little has been done with the available data --- we are aware
359 only of uses for simple win/loss statistics \cite{KGSAnalytics} \cite{ProGoR}
360 and ``next move'' statistics on a~specific position \cite{Kombilo} \cite{MoyoGo}.
362 We present a~more in-depth approach --- from all played moves, we devise
363 a~compact evaluation of each player. We then explore correlations between
364 evaluations of various players in light of externally given information.
365 This way, we can discover similarity between moves characteristics of
366 players with the same playing strength, or discuss the meaning of the
367 "playing style" concept on the assumption that similar playing styles
368 should yield similar moves characteristics.
371 \section{Data Extraction}
372 \label{pattern-vectors}
374 As the input of our analysis, we use large collections of game records%
375 \footnote{We use the SGF format \cite{SGF} in our implementation.}
376 grouped by the primary object of analysis (player name, player rank, etc.).
377 We process the games by object, generating a description for each
378 played move -- a {\em pattern}, being a combination of several
379 {\em pattern features} described below.
381 We keep track of the most
382 occuring patterns, finally composing $n$-dimensional {\em pattern vector}
383 $\vec p$ of per-pattern counts from the $n$ globally most frequent patterns%
384 \footnote{We use $n=500$ in our analysis.}
385 (the mapping from patterns to vector elements is common for all objects).
386 We can then process and compare just the pattern vectors.
388 \subsection{Pattern Features}
389 When deciding how to compose the patterns we use to describe moves,
390 we need to consider a specificity tradeoff --- overly general descriptions carry too few
391 information to discern various player attributes; too specific descriptions
392 gather too few specimen over the games sample and the vector differences are
393 not statistically significant.
395 We have chosen an intuitive and simple approach inspired by pattern features
396 used when computing Elo ratings for candidate patterns in Computer Go play.
397 \cite{PatElo} Each pattern is a~combination of several {\em pattern features}
398 (name--value pairs) matched at the position of the played move.
399 We use these features:
401 \begin{itemize}
402 \item capture move flag
403 \item atari move flag
404 \item atari escape flag
405 \item contiguity-to-last flag%
406 \footnote{We do not consider contiguity features in some cases when we are working
407 on small game samples and need to reduce pattern diversity.}
408 --- whether the move has been played in one of 8 neighbors of the last move
409 \item contiguity-to-second-last flag
410 \item board edge distance --- only up to distance 4
411 \item spatial pattern --- configuration of stones around the played move
412 \end{itemize}
414 The spatial patterns are normalized (using a dictionary) to be always
415 black-to-play and maintain translational and rotational symmetry.
416 Configurations of radius between 2 and 9 in the gridcular metric%
417 \footnote{The {\em gridcular} metric
418 $d(x,y) = |\delta x| + |\delta y| + \max(|\delta x|, |\delta y|)$ produces
419 a circle-like structure on the Go board square grid. \cite{SpatPat} }
420 are matched.
422 Pattern vectors representing these features contain information on
423 played shape as well as a basic representation of tactical dynamics
424 --- threats to capture stones, replying to last move, or ignoring
425 opponent's move elsewhere to return to an urgent local situation.
426 The shapes most frequently correspond to opening moves
427 (either in empty corners and sides, or as part of {\em joseki}
428 --- commonly played sequences) characteristic for a certain
429 strategic aim. In the opening, even a single-line difference
430 in the distance from the border can have dramatic impact on
431 further local and global development.
433 \subsection{Vector Rescaling}
435 The pattern vector elements can have diverse values since for each object,
436 we consider a different number of games (and thus patterns).
437 Therefore, we normalize the values to range $[-1,1]$,
438 the most frequent pattern having the value of $1$ and the least occuring
439 one being $-1$.
440 Thus, we obtain vectors describing relative frequency of played patterns
441 independent on number of gathered patterns.
442 But there are multiple ways to approach the normalization.
444 \begin{figure}[!t]
445 \centering
446 \includegraphics{patcountdist}
447 \caption{Log-scaled number of pattern occurences
448 in the GoGoD games examined in sec. \ref{styleest}.}
449 \label{fig:patcountdist}
450 \end{figure}
452 \subsubsection{Linear Normalization}
454 One is simply to linearly re-scale the values using:
455 $$y_i = {x_i - x_{\rm min} \over x_{\rm max}}$$
456 This is the default approach; we have used data processed by only this
457 computation unless we note otherwise.
458 As shown on fig. \ref{fig:patcountdist}, most of the spectrum is covered
459 by the few most-occuring patterns (describing mostly large-diameter
460 shapes from the game opening). This means that most patterns will be
461 always represented by only very small values near the lower bound.
463 \subsubsection{Extended Normalization}
464 \label{xnorm}
466 To alleviate this problem, we have also tried to modify the linear
467 normalization by applying two steps --- {\em pre-processing}
468 the raw counts using
469 $$x_i' = \log (x_i + 1)$$
470 and {\em post-processing} the re-scaled values by the logistic function:
471 $$y_i' = {2 \over 1 + e^{-cy_i}}-1$$
472 However, we have found that this method is not universally beneficial.
473 In our styles case study (sec. \ref{styleest}), this normalization
474 produced PCA decomposition with significant dimensions corresponding
475 better to some of the prior knowledge and more instructive for manual
476 inspection, but ultimately worsened accuracy of our classifiers;
477 we conjecture from this that the most frequently occuring patterns are
478 also most important for classification of major style aspects.
480 \subsection{Implementation}
482 We have implemented the data extraction by making use of the pattern
483 features matching implementation%
484 \footnote{The pattern features matcher was developed by one of the
485 authors according to the Elo-rating pattern selection scheme. \cite{PatElo}}
486 within the Pachi go-playing program \cite{Pachi}.
487 We extract information on players by converting the SGF game
488 records to GTP stream \cite{GTP} that feeds Pachi's {\tt patternscan}
489 engine, outputting a~single {\em patternspec} (string representation
490 of the particular pattern features combination) per move. Of course,
491 only moves played by the appropriate color in the game are collected.
493 \section{Data Mining}
494 \label{data-mining}
496 To assess the properties of gathered pattern vectors
497 and their influence on playing styles,
498 we process the data by several basic data minining techniques.
500 The first two methods {\em (analytic)} rely purely on single data set
501 and serve to show internal structure and correlations within the data set.
503 Principal Component Analysis finds orthogonal vector components that
504 have the largest variance.
505 Reversing the process can indicate which patterns correlate with each component.
506 Additionally, PCA can be used as vector preprocessing for methods
507 that are negatively sensitive to pattern vector component correlations.
509 The~second method of Sociomaps creates spatial
510 representation of the data set elements (e.g. players) based on
511 similarity of their data set features; we can then project other
512 information on the map to illutrate its connection to the data set.
514 Furthermore, we test several \emph{classification} methods that assign
515 each pattern vector $\vec P$ an \emph{output vector} $\vec O$,
516 representing e.g.~information about styles, player's strength or even
517 meta-information like the player's era or a country of origin.
519 Initially, the methods must be calibrated (trained) on some prior knowledge,
520 usually in the form of \emph{reference pairs} of pattern vectors
521 and the associated output vectors.
522 The reference set is divided into training and testing pairs
523 and the methods can be compared by the mean square error (MSE) on testing data set
524 (difference of output vectors approximated by the method and their real desired value).
526 %\footnote{However, note that dicrete characteristics such as country of origin are
527 %not very feasible to use here, since WHAT??? is that even true?? }
529 The most trivial method is approximation by the PCA representation
530 matrix, provided that the PCA dimensions have some already well-defined
531 implementation; this can be true for single-dimensional information like
532 the playing strength.
534 Aside of that, we test the $k$-Nearest Neighbors \cite{CoverHart1967} classifier
535 that approximates $\vec O$ by composing the output vectors
536 of $k$ reference pattern vectors closest to $\vec P$.
538 Another classifier is a~multi-layer feed-forward Artificial Neural Network:
539 the neural network can learn correlations between input and output vectors
540 and generalize the ``knowledge'' to unknown vectors; it can be more flexible
541 in the interpretation of different pattern vector elements and discern more
542 complex relations than the $k$-NN classifier,
543 but may not be as stable and expects larger training sample.
545 Finally, a commonly used classifier in statistical inference is
546 the Naive Bayes classifier; it can infer relative probability of membership
547 in various classes based on previous evidence (training patterns). \cite{Bayes}
549 \subsection{Statistical Methods}
550 We use couple of general statistical analysis together with the particular
551 techniques.
553 \label{pearson}
554 To find correlations within or between extracted data and
555 some prior knowledge (player rank, style vector), we compute the well-known
556 {\em Pearson product-moment correlation coefficient} \cite{Pearson},
557 measuring the strength of the linear dependence%
558 \footnote{A desirable property of PMCC is that it is invariant to translations and rescaling
559 of the vectors.}
560 between any two dimensions:
562 $$ r_{X,Y} = {{\rm cov}(X,Y) \over \sigma_X \sigma_Y} $$
564 To compare classifier performance on the reference data, we employ
565 {\em $k$-fold cross validation}:
566 we randomly divide the training set (organized by measured subjects, usually players)
567 into $k$ distinct segments of similar sizes and then iteratively
568 use each part as a~testing set as the other $k-1$ parts are used as a~training set.
569 We then average results over all iterations.
571 \subsection{Principal Component Analysis}
572 \label{PCA}
573 We use Principal Component Analysis \emph{PCA} \cite{Jolliffe1986}
574 to reduce the dimensions of the pattern vectors while preserving
575 as much information as possible, assuming inter-dependencies between
576 pattern vector dimensions are linear.
578 Briefly, PCA is an eigenvalue decomposition of a~covariance matrix of centered pattern vectors,
579 producing a~linear mapping $o$ from $n$-dimensional vector space
580 to a~reduced $m$-dimensional vector space.
581 The $m$ eigenvectors of the original vectors' covariance matrix
582 with the largest eigenvalues are used as the base of the reduced vector space;
583 the eigenvectors form projection matrix $W$.
585 For each original pattern vector $\vec p_i$,
586 we obtain its new representation $\vec r_i$ in the PCA base
587 as shown in the following equation:
588 \begin{equation}
589 \vec r_i = W \cdot \vec p_i
590 \end{equation}
592 The whole process is described in the Algorithm \ref{alg:pca}.
594 \begin{algorithm}
595 \caption{PCA -- Principal Component Analysis}
596 \begin{algorithmic}
597 \label{alg:pca}
598 \REQUIRE{$m > 0$, set of players $R$ with pattern vectors $p_r$}
599 \STATE $\vec \mu \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_r}$
600 \FOR{ $r \in R$}
601 \STATE $\vec p_r \leftarrow \vec p_r - \vec \mu$
602 \ENDFOR
603 \FOR{ $(i,j) \in \{1,... ,n\} \times \{1,... ,n\}$}
604 \STATE $\mathit{Cov}[i,j] \leftarrow 1/|R| \cdot \sum_{r \in R}{\vec p_{ri} \cdot \vec p_{rj}}$
605 \ENDFOR
606 \STATE Compute Eigenvalue Decomposition of $\mathit{Cov}$ matrix
607 \STATE Get $m$ largest eigenvalues
608 \STATE Most significant eigenvectors ordered by decreasing eigenvalues form the rows of matrix $W$
609 \FOR{ $r \in R$}
610 \STATE $\vec r_r\leftarrow W \vec p_r$
611 \ENDFOR
612 \end{algorithmic}
613 \end{algorithm}
615 \subsection{Sociomaps}
616 \label{soc}
617 Sociomaps are a general mechanism for visualising possibly assymetric
618 relationships on a 2D plane such that ordering of the
619 subject distances in the dataset is preserved in distances on the plane.
621 In our particular case,%
622 \footnote{A special case of the {\em Subject-to-Object Relation Mapping (STORM)} indirect sociomap.}
623 we will consider a dataset $\vec S$ of small-dimensional
624 vectors $\vec s_i$. First, we estimate the significance of difference
625 between each two subjects.
626 Then, we determine projection $\varphi$ of all the $\vec s_i$
627 to spatial coordinates of an Euclidean plane, such that it reflects
628 the estimated difference significances.
630 To quantify the differences between the subjects ({\em team profiling} \cite{TeamProf})
631 into an $A$ matrix, for each two subjects $i, j$ we compute the scalar distance%
632 \footnote{We use the {\em Manhattan} metric $d(x,y) = \sum_i |x_i - y_i|$.}
633 of $s_i, s_j$ and then estimate the $A_{ij}$ probability of at least such distance
634 occuring in uniformly-distributed input. This probability expresses the significance
635 of the difference between the two subjects.
637 To visualize the quantified differences \cite{Sociomaps}, we need to find
638 the $\varphi$ projection such that it maximizes a {\em three-way ordering} criterion:
639 ordering of any three members within $A$ and on the plane
640 (by Euclidean metric) must be the same.
642 $$ \max_\varphi \sum_{i\ne j\ne k} \Phi(\varphi, i, j, k) $$
643 $$ \Phi(\varphi, i, j, k) = \begin{cases}
644 1 & \delta(1,A_{ij},A_{ik}) = \delta(\varphi(i),\varphi(j),\varphi(k)) \\
645 0 & \hbox{otherwise} \end{cases} $$
646 $$ \delta(a, b, c) = \begin{cases}
647 1 & |a-b| > |a-c| \\
648 0 & |a-b| = |a-c| \\
649 -1 & |a-b| < |a-c| \end{cases} $$
651 The $\varphi$ projection is then determined by randomly initializing
652 the position of each subject and then employing gradient descent methods.
654 \subsection{k-nearest Neighbors Classifier}
655 \label{knn}
656 Our goal is to approximate player's output vector $\vec O$,
657 knowing their pattern vector $\vec P$.
658 We further assume that similarities in players' pattern vectors
659 uniformly correlate with similarities in players' output vectors.
661 We require a set of reference players $R$ with known \emph{pattern vectors} $\vec p_r$
662 and \emph{output vectors} $\vec o_r$.
664 $\vec O$ is approximated as a~weighted average of \emph{output vectors}
665 $\vec o_i$ of $k$ players with \emph{pattern vectors} $\vec p_i$ closest to $\vec P$.
666 This is illustrated in the Algorithm \ref{alg:knn}.
667 Note that the weight is a function of distance and is not explicitly defined in Algorithm \ref{alg:knn}.
668 During our research, exponentially decreasing weight has proven to be sufficient.
670 \begin{algorithm}
671 \caption{k-Nearest Neighbors}
672 \begin{algorithmic}
673 \label{alg:knn}
674 \REQUIRE{pattern vector $\vec P$, $k > 0$, set of reference players $R$}
675 \FORALL{$r \in R$ }
676 \STATE $D[r] \leftarrow \mathit{EuclideanDistance}(\vec p_r, \vec P)$
677 \ENDFOR
678 \STATE $N \leftarrow \mathit{SelectSmallest}(k, R, D)$
679 \STATE $\vec O \leftarrow \vec 0$
680 \FORALL{$r \in N $}
681 \STATE $\vec O \leftarrow \vec O + \mathit{Weight}(D[r]) \cdot \vec o_r $
682 \ENDFOR
683 \end{algorithmic}
684 \end{algorithm}
686 \subsection{Neural Network Classifier}
687 \label{neural-net}
689 Feed-forward neural networks are known for their ability to generalize
690 and find correlations between input patterns and output classifications.
691 Before use, the network is iteratively trained on the training data
692 until the error on the training set is reasonably small.
694 %Neural network is an adaptive system that must undergo a training
695 %period similarly to the requirement
696 %of reference vectors for the k-Nearest Neighbors algorithm above.
698 \subsubsection{Computation and activation of the NN}
699 Technically, the neural network is a network of interconnected
700 computational units called neurons.
701 A feedforward neural network has a layered topology;
702 it usually has one \emph{input layer}, one \emph{output layer}
703 and an arbitrary number of \emph{hidden layers} between.
705 Each neuron $i$ is connected to all neurons in the previous layer and each connection has its weight $w_{ij}$
707 The computation proceeds in discrete time steps.
708 In the first step, the neurons in the \emph{input layer}
709 are \emph{activated} according to the \emph{input vector}.
710 Then, we iteratively compute output of each neuron in the next layer
711 until the output layer is reached.
712 The activity of output layer is then presented as the result.
714 The activation $y_i$ of neuron $i$ from the layer $I$ is computed as
715 \begin{equation}
716 y_i = f\left(\sum_{j \in J}{w_{ij} y_j}\right)
717 \end{equation}
718 where $J$ is the previous layer, while $y_j$ is the activation for neurons from $J$ layer.
719 Function $f()$ is a~so-called \emph{activation function}
720 and its purpose is to bound the outputs of neurons.
721 A typical example of an activation function is the sigmoid function.%
722 \footnote{A special case of the logistic function $\sigma(x)=(1+e^{-(rx+k)})^{-1}$.
723 Parameters control the growth rate $r$ and the x-position $k$.}
725 \subsubsection{Training}
726 Training of the feed-forward neural network usually involves some
727 modification of supervised Backpropagation learning algorithm.
728 We use first-order optimization algorithm called RPROP. \cite{Riedmiller1993}
730 %Because the \emph{reference set} is usually not very large,
731 %we have devised a simple method for its extension.
732 %This enhancement is based upon adding random linear combinations
733 %of \emph{style and pattern vectors} to the training set.
735 As outlined above, the training set $T$ consists of
736 $(\vec p_i, \vec o_i)$ pairs.
737 The training algorithm is shown in Algorithm \ref{alg:tnn}.
739 \begin{algorithm}
740 \caption{Training Neural Network}
741 \begin{algorithmic}
742 \label{alg:tnn}
743 \REQUIRE{Train set $T$, desired error $e$, max iterations $M$}
744 \STATE $N \leftarrow \mathit{RandomlyInitializedNetwork}()$
745 \STATE $\mathit{It} \leftarrow 0$
746 \REPEAT
747 \STATE $\mathit{It} \leftarrow \mathit{It} + 1$
748 \STATE $\Delta \vec w \leftarrow \vec 0$
749 \STATE $\mathit{TotalError} \leftarrow 0$
750 %\FORALL{$(\overrightarrow{Input}, \overrightarrow{DesiredOutput}) \in T$}
751 %\STATE $\overrightarrow{Output} \leftarrow Result(N, \overrightarrow{Input})$
752 %\STATE $E \leftarrow |\overrightarrow{DesiredOutput} - \overrightarrow{Output}|$
753 \FORALL{$(\mathit{Input}, \mathit{DesiredOutput}) \in T$}
754 \STATE $\mathit{Output} \leftarrow \mathit{Result}(N, \mathit{Input})$
755 \STATE $\mathit{Error} \leftarrow |\mathit{DesiredOutput} - \mathit{Output}|$
756 \STATE $\Delta \vec w \leftarrow \Delta \vec w + \mathit{WeightUpdate}(N,\mathit{Error})$
757 \STATE $\mathit{TotalError} \leftarrow \mathit{TotalError} + \mathit{Error}$
758 \ENDFOR
759 \STATE $N \leftarrow \mathit{ModifyWeights}(N, \Delta \vec w)$
760 \UNTIL{$\mathit{TotalError} < e$ or $ \mathit{It} > M$}
761 \end{algorithmic}
762 \end{algorithm}
764 \subsection{Naive Bayes Classifier}
765 \label{naive-bayes}
767 Naive Bayes Classifier uses existing information to construct
768 probability model of likelihoods of given {\em feature variables}
769 based on a discrete-valued {\em class variable}.
770 Using the Bayes equation, we can then estimate the probability distribution
771 of class variable for particular values of the feature variables.
773 In order to approximate the player's output vector $\vec O$ based on
774 pattern vector $\vec P$, we will compute each element of the
775 output vector separately, covering the output domain by several $k$-sized
776 discrete intervals (classes).
778 We will also in fact work on
779 PCA-represented input $\vec R$ (using the 10 most significant
780 dimensions), since smaller input dimension is more computationally
781 feasible and $\vec R$ also better fits the pre-requisites of the
782 classifier, the dimensions being more independent and
783 better approximating the normal distribution.
785 When training the classifier for $\vec O$ element $o_i$
786 of class $c = \lfloor o_i/k \rfloor$,
787 we assume the $\vec R$ elements are normally distributed and
788 feed the classifier information in the form
789 $$ \vec R \mid c $$
790 estimating the mean $\mu_c$ and standard deviation $\sigma_c$
791 of each $\vec R$ element for each encountered $c$
792 (see algorithm \ref{alg:tnb}).
794 Then, we can query the built probability model on
795 $$ \max_c P(c \mid \vec R) $$
796 obtaining the most probable class $i$ for an arbitrary $\vec R$
797 Each probability is obtained using the normal distribution formula:
798 $$ P(c \mid x) = {1\over \sqrt{2\pi\sigma_c^2}}\exp{-(x-\mu_c)^2\over2\sigma_c^2} $$
800 \begin{algorithm}
801 \caption{Training Naive Bayes}
802 \begin{algorithmic}
803 \label{alg:tnb}
804 \REQUIRE{Train set $T = (\mathit{R, c})$}
805 \FORALL{$(R, c) \in T$}
806 \STATE $\mathit{RbyC}_c \leftarrow \{\mathit{RbyC}_c, R\}$
807 \ENDFOR
808 \FORALL{$c$}
809 \STATE $\mu_c \leftarrow {1 \over |\mathit{RbyC}_c|} \sum_{R \in \mathit{RbyC}_c} R$
810 \ENDFOR
811 \FORALL{$c$}
812 \STATE $\sigma_c \leftarrow {1 \over |\mathit{RbyC}_c|} \sum_{R \in \mathit{RbyC}_c} R-\mu_c $
813 \ENDFOR
814 \end{algorithmic}
815 \end{algorithm}
817 \subsection{Implementation}
819 We have implemented the data mining methods as the
820 ``gostyle'' open-source framework \cite{GoStyle},
821 made available under the GNU GPL licence.
823 The majority of our basic processing and the analysis parts
824 are implemented in the Python \cite{Python25} programming language.
825 We use several external libraries, most notably the MDP library \cite{MDP}.
826 The neural network part of the project is written using the libfann C library\cite{Nissen2003}.
827 The Naive Bayes Classifier uses the {\tt AI::NaiveBayes1} Perl module\cite{NaiveBayes1}.
829 The sociomap has been visualised using the Team Profile Analyzer \cite{TPA}
830 which is part of the Sociomap suite \cite{SociomapSite}.
833 \section{Strength Estimation}
835 \begin{figure*}[!t]
836 \centering
837 \includegraphics[width=7in]{strength-pca}
838 \caption{PCA of by-strength vectors}
839 \label{fig:strength_pca}
840 \end{figure*}
842 First, we have used our framework to analyse correlations of pattern vectors
843 and playing strength. Like in other competitively played board games, Go players
844 receive real-world {\em rating number} based on tournament games,
845 and {\em rank} based on their rating.%
846 \footnote{Elo-type rating system \cite{GoR} is usually used,
847 corresponding to even win chances for game of two players with the same rank,
848 and about 2:3 win chance for the stronger in case of one rank difference.}
849 \footnote{Professional ranks and dan ranks in some Asia countries may
850 be assigned differently.}
851 The amateur ranks range from 30-kyu (beginner) to 1-kyu (intermediate)
852 and then follows 1-dan to 9-dan\footnote{7-dan in some systems.} (top-level player).
853 Multiple independent real-world ranking scales exist
854 (geographically based), also online servers maintain their own user ranking;
855 the difference between scales can be up to several ranks and the rank
856 distributions also differ. \cite{RankComparison}
858 \subsection{Data used}
859 As the source game collection, we use Go Teaching Ladder reviews archive%
860 \footnote{The reviews contain comments and variations --- we consider only the main
861 variation with the actual played game.}
862 \cite{GTL} --- this collection contains 7700 games of players with strength ranging
863 from 30-kyu to 4-dan; we consider only even games with clear rank information.
864 Since the rank information is provided by the users and may not be consistent,
865 we are forced to take a simplified look at the ranks,
866 discarding the differences between various systems and thus somewhat
867 increasing error in our model.\footnote{Since our results seem satisfying,
868 we did not pursue to try another collection;
869 one could e.g. look at game archives of some Go server to work within
870 single more-or-less consistent rank model.}
872 To represent the rank in our dataset, we have rescaled it to $[-3,30]$ with positive
873 numbers representing the kyu ranks and numbers smaller than 1 representing the dan
874 ranks: 4-dan maps to $-3$, 1-dan to $0$, etc.
876 \subsection{PCA analysis}
877 First, we have created a single pattern vector for each rank between 30-kyu to 4-dan;
878 we have performed PCA analysis on the pattern vectors, achieving near-perfect
879 rank correspondence in the first PCA dimension%
880 \footnote{The eigenvalue of the second dimension was four times smaller,
881 with no discernable structure revealed within the lower-order eigenvectors.}
882 (figure \ref{fig:strength_pca}).
884 We measure the accuracy of strength approximation by the first dimension
885 using Pearson's $r$ (see \ref{pearson}), yielding very satisfying value of $r=0.979$
886 implying extremely strong correlation.
887 \footnote{Extended vector normalization (sec. \ref{xnorm})
888 produced noticeably less clear-cut results.}
890 \subsection{Strength classification}
892 We have trained the tested classifiers using one pattern vector
893 per rank, then performing many-fold validation by repeatedly and
894 exhaustively taking disjunct $k$-game samples of the same rank from the collection%
895 \footnote{Arbitrary game numbers are approximated by pattern file sizes,
896 iteratively selecting all games of randomly selected player
897 of the required strength.}
898 and measuring the standard error of the classifier.
900 When assessing the strength classifiers,
901 we have explored the influence of different game sample sizes ($k$)
902 on the classification accuracy to hint on practicality and scaling
903 abilities of the classifiers.
904 In order to reduce the diversity of patterns (negatively impacting accuracy
905 on small samples), we do not consider the contiguity pattern features.
907 %We have randomly separated $10\%$ of the game database as a testing set,
908 %Using the most %of players within the test group yields MSE TODO, thus providing
909 %reasonably satisfying accuracy by itself.
911 %Using the Naive Bayes classifier yields MSE TODO.
913 Using the $4$-Nearest Neighbors classifier with the weight function
914 \begin{equation}
915 \mathit{Weight}(\vec x) = 0.9^{M*\mathit{Distance}(\vec x)}
916 \end{equation}
917 (parameter $M$ ranging from $30$ to $6$),
918 we have achieved the results described in the table \ref{table-str-class}
919 --- overally obtaining reasonable accuracy even on as few as 5 games as a sample.
920 The error on the rank scale is listed as mean quare error (MSE)
921 and standard deviation $\sigma$ (the difference from the real rank on average).
923 For comparison purposes, the table also includes a PCA classifie
924 (the most significant PCA eigenvector position is directly taken as a~rank)
925 and a~random classifier.
927 \begin{table}[!t]
928 % increase table row spacing, adjust to taste
929 \renewcommand{\arraystretch}{1.3}
930 \caption{Strength Classifier Performance}
931 \label{table-str-class}
932 \centering
933 \begin{tabular}{|c|c||c|c||c|}
934 \hline
935 Method & $\sim$ games & MSE & $\sigma$ & Cmp \\ \hline
936 $k$-NN&$85$ & $5.514$ & $2.348$ & $6.150$ \\
937 &$43$ & $8.449$ & $2.907$ & $4.968$ \\
938 &$17$ & $10.096$& $3.177$ & $4.545$ \\
939 &$9$ & $21.343$& $4.620$ & $3.126$ \\
940 &$2$ & $52.212$& $7.226$ & $1.998$ \\\hline
942 PCA & $85$ & $24.070$ & $4.906$ & $2.944$ \\
943 &$43$ & $31.324$ & $5.597$ & $2.580$ \\
944 &$17$ & $50.390$ & $7.099$ & $2.034$ \\
945 &$9$ & $72.528$ & $8.516$ & $1.696$ \\
946 &$2$ & $128.660$& $11.343$ & $1.273$ \\ \hline
948 Rnd & N/A & $208.549$ & $14.441$ & $1.000$ \\ \hline
949 \end{tabular}
950 \end{table}
952 %#Finally, we used $8$-fold cross validation on one-file-per-rank data,
953 %yielding a MSE $0.085$ which is equivalent to standard deviation of $15\%$.
955 \section{Style Estimation}
956 \label{styleest}
958 As a~second case study for our pattern analysis,
959 we investigate pattern vectors $\vec p$ of various well-known players,
960 their relationships in-between and to prior knowledge
961 in order to explore the correlation of prior knowledge with extracted patterns.
962 We look for relationships between pattern vectors and perceived
963 ``playing style'' and attempt to use our classifiers to transform
964 pattern vector $\vec p$ to style vector $\vec s$.
966 The source game collection is GoGoD Winter 2008 \cite{GoGoD} containing 55000
967 professional games, dating from the early Go history 1500 years ago to the present.
968 We consider only games of a small subset of players (table \ref{fig:style_marks});
969 we have chosen them for being well-known within the players community,
970 having large number of played games in our collection and not playing too long
971 ago.\footnote{Over time, many commonly used sequences get altered, adopted and
972 dismissed; usual playing conditions can also differ significantly.}
974 \subsection{Expert-based knowledge}
975 \label{style-vectors}
976 In order to provide a reference frame for our style analysis,
977 we have gathered some information from game experts about various
978 traditionally perceived style aspects to use as a prior knowledge.
979 This expert-based knowledge allows us to predict styles of unknown players
980 based on the similarity of their pattern vectors,
981 as well as discover correlations between styles and proportions
982 of played patterns.
984 Experts were asked to mark four style aspects of each of the given players
985 on the scale from 1 to 10. The style aspects are defined as shown:
987 \vspace{4mm}
988 \noindent
989 %\begin{table}
990 \begin{center}
991 %\caption{Styles}
992 \begin{tabular}{|c|c|c|}
993 \hline
994 Style & 1 & 10\\ \hline
995 Territoriality $\tau$ & Moyo & Territory \\
996 Orthodoxity $\omega$ & Classic & Novel \\
997 Aggressivity $\alpha$ & Calm & Figting \\
998 Thickness $\theta$ & Safe & Shinogi \\ \hline
999 \end{tabular}
1000 \end{center}
1001 %\end{table}
1002 \vspace{4mm}
1004 We have devised these four style aspects based on our own Go experience
1005 and consultations with other experts.
1006 The used terminology has quite
1007 clear meaning to any experienced Go player and there is not too much
1008 room for confusion, except possibly in the case of ``thickness'' ---
1009 but the concept is not easy to pin-point succintly and we also did not
1010 add extra comments on the style aspects to the questionnaire deliberately
1011 to accurately reflect any diversity in understanding of the terms.
1013 Averaging this expert based evaluation yields \emph{reference style vector}
1014 $\vec s_r$ (of dimension $4$) for each player $r$
1015 from the set of \emph{reference players} $R$.
1017 Throughout our research, we have experimentally found that playing era
1018 is also a major factor differentiating between patterns. Thus, we have
1019 further extended the $\vec s_r$ by median year over all games played
1020 by the player.
1022 \begin{table}[!t]
1023 % increase table row spacing, adjust to taste
1024 \renewcommand{\arraystretch}{1.3}
1025 \caption{Covariance Measure of Prior Information Dimensions}
1026 \label{fig:style_marks_r}
1027 \centering
1028 % Some packages, such as MDW tools, offer better commands for making tables
1029 % than the plain LaTeX2e tabular which is used here.
1030 \begin{tabular}{|r||r||r||r||r||r|}
1031 \hline
1032 & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & year \\
1033 \hline
1034 $\tau$ &$1.000$&$\mathbf{-0.438}$&$\mathbf{-0.581}$&$\mathbf{ 0.721}$&$ 0.108$\\
1035 $\omega$& &$ 1.000$&$\mathbf{ 0.682}$&$ 0.014$&$-0.021$\\
1036 $\alpha$& & &$ 1.000$&$-0.081$&$ 0.030$\\
1037 $\theta$& &\multicolumn{1}{c||}{---}
1038 & &$ 1.000$&$-0.073$\\
1039 y. & & & & &$ 1.000$\\
1040 \hline
1041 \end{tabular}
1042 \end{table}
1044 Three high-level Go players (Alexander Dinerstein 3-pro, Motoki Noguchi
1045 7-dan and V\'{i}t Brunner 4-dan) have judged the style of the reference
1046 players.
1047 The complete list of answers is in table \ref{fig:style_marks}.
1048 Standard error of the answers is 0.952, making the data reasonably reliable,
1049 though much larger sample would of course be more desirable
1050 (but beyond our means to collect).
1051 We have also found significant correlation between the various
1052 style aspects, as shown by the Pearson's $r$ values
1053 in table \ref{fig:style_marks_r}.
1055 \begin{table}[!t]
1056 % increase table row spacing, adjust to taste
1057 \renewcommand{\arraystretch}{1.4}
1058 \begin{threeparttable}
1059 \caption{Expert-Based Style Aspects of Selected Professionals\tnote{1} \tnote{2}}
1060 \label{fig:style_marks}
1061 \centering
1062 % Some packages, such as MDW tools, offer better commands for making tables
1063 % than the plain LaTeX2e tabular which is used here.
1064 \begin{tabular}{|c||c||c||c||c|}
1065 \hline
1066 {Player} & $\tau$ & $\omega$ & $\alpha$ & $\theta$ \\
1067 \hline
1068 Go Seigen\tnote{3} & $6.0 \pm 2.0$ & $9.0 \pm 1.0$ & $8.0 \pm 1.0$ & $5.0 \pm 1.0$ \\
1069 Ishida Yoshio\tnote{4}&$8.0 \pm 1.4$ & $5.0 \pm 1.4$ & $3.3 \pm 1.2$ & $5.3 \pm 0.5$ \\
1070 Miyazawa Goro & $1.5 \pm 0.5$ & $10 \pm 0 $ & $9.5 \pm 0.5$ & $4.0 \pm 1.0$ \\
1071 Yi Ch'ang-ho\tnote{5}& $7.0 \pm 0.8$ & $5.0 \pm 1.4$ & $2.6 \pm 0.9$ & $2.6 \pm 1.2$ \\
1072 Sakata Eio & $7.6 \pm 1.7$ & $4.6 \pm 0.5$ & $7.3 \pm 0.9$ & $8.0 \pm 1.6$ \\
1073 Fujisawa Hideyuki & $3.5 \pm 0.5$ & $9.0 \pm 1.0$ & $7.0 \pm 0.0$ & $4.0 \pm 0.0$ \\
1074 Otake Hideo & $4.3 \pm 0.5$ & $3.0 \pm 0.0$ & $4.6 \pm 1.2$ & $3.6 \pm 0.9$ \\
1075 Kato Masao & $2.5 \pm 0.5$ & $4.5 \pm 1.5$ & $9.5 \pm 0.5$ & $4.0 \pm 0.0$ \\
1076 Takemiya Masaki\tnote{4}&$1.3\pm 0.5$& $6.3 \pm 2.1$ & $7.0 \pm 0.8$ & $1.3 \pm 0.5$ \\
1077 Kobayashi Koichi & $9.0 \pm 1.0$ & $2.5 \pm 0.5$ & $2.5 \pm 0.5$ & $5.5 \pm 0.5$ \\
1078 Cho Chikun & $9.0 \pm 0.8$ & $7.6 \pm 0.9$ & $6.6 \pm 1.2$ & $9.0 \pm 0.8$ \\
1079 Ma Xiaochun & $8.0 \pm 2.2$ & $6.3 \pm 0.5$ & $5.6 \pm 1.9$ & $8.0 \pm 0.8$ \\
1080 Yoda Norimoto & $6.3 \pm 1.7$ & $4.3 \pm 2.1$ & $4.3 \pm 2.1$ & $3.3 \pm 1.2$ \\
1081 Luo Xihe & $7.3 \pm 0.9$ & $7.3 \pm 2.5$ & $7.6 \pm 0.9$ & $6.0 \pm 1.4$ \\
1082 O Meien & $2.6 \pm 1.2$ & $9.6 \pm 0.5$ & $8.3 \pm 1.7$ & $3.6 \pm 1.2$ \\
1083 Rui Naiwei & $4.6 \pm 1.2$ & $5.6 \pm 0.5$ & $9.0 \pm 0.8$ & $3.3 \pm 1.2$ \\
1084 Yuki Satoshi & $3.0 \pm 1.0$ & $8.5 \pm 0.5$ & $9.0 \pm 1.0$ & $4.5 \pm 0.5$ \\
1085 Hane Naoki & $7.5 \pm 0.5$ & $2.5 \pm 0.5$ & $4.0 \pm 0.0$ & $4.5 \pm 1.5$ \\
1086 Takao Shinji & $5.0 \pm 1.0$ & $3.5 \pm 0.5$ & $5.5 \pm 1.5$ & $4.5 \pm 0.5$ \\
1087 Yi Se-tol & $5.3 \pm 0.5$ & $6.6 \pm 2.5$ & $9.3 \pm 0.5$ & $6.6 \pm 1.2$ \\
1088 Yamashita Keigo\tnote{4}&$2.0\pm 0.0$& $9.0 \pm 1.0$ & $9.5 \pm 0.5$ & $3.0 \pm 1.0$ \\
1089 Cho U & $7.3 \pm 2.4$ & $6.0 \pm 0.8$ & $5.3 \pm 1.7$ & $6.3 \pm 1.7$ \\
1090 Gu Li & $5.6 \pm 0.9$ & $7.0 \pm 0.8$ & $9.0 \pm 0.8$ & $4.0 \pm 0.8$ \\
1091 Chen Yaoye & $6.0 \pm 1.0$ & $4.0 \pm 1.0$ & $6.0 \pm 1.0$ & $5.5 \pm 0.5$ \\
1092 \hline
1093 \end{tabular}
1094 \begin{tablenotes}
1095 \item [1] Including standard deviation. Only players where we received at least two out of three answers are included.
1096 \item [2] Since the playing era column does not fit into the table, we at least sort the players ascending by their median year.
1097 \item [3] We do not consider games of Go Seigen due to him playing across several distinct eras and also being famous for radical opening experiments throughout the time, and thus featuring especially high diversity in patterns.
1098 \item [4] We do not consider games of Ishida Yoshio and Yamashita Keigo for the PCA analysis since they are significant outliers, making high-order dimensions much like purely ``similarity to this player''. Takemiya Masaki has the similar effect for the first dimension, but that case corresponds to common knowledge of him being an extreme proponent of anti-territorial (``moyo'') style.
1099 \item [5] We consider games only up to year 2004, since Yi Ch'ang-ho was prominent representative of a balanced, careful player until then and still has this reputation in minds of many players, but is regarded to have altered his style significantly afterwards.
1100 \end{tablenotes}
1101 \end{threeparttable}
1102 \end{table}
1104 \subsection{Style Components Analysis}
1106 \begin{figure}[!t]
1107 \centering
1108 \includegraphics[width=3in]{style-pca}
1109 \caption{PCA of per-player vectors}
1110 \label{fig:style_pca}
1111 \end{figure}
1113 We have looked at the ten most significant dimensions of the pattern data
1114 yielded by the PCA analysis of the reference player set%
1115 \footnote{We also tried to observe PCA effect of removing outlying Takemiya
1116 Masaki. That way, the second dimension strongly
1117 correlated to territoriality and third dimension strongly correlacted to era,
1118 however the first dimension remained mysteriously uncorrelated and with no
1119 obvious interpretation.}
1120 (fig. \ref{fig:style_pca} shows the first three).
1121 We have again computed the Pearson's $r$ for all combinations of PCA dimensions
1122 and dimensions of the prior knowledge style vectors to find correlations.
1124 \begin{table}[!t]
1125 % increase table row spacing, adjust to taste
1126 \renewcommand{\arraystretch}{1.4}
1127 \caption{Covariance Measure of PCA and Prior Information}
1128 \label{fig:style_r}
1129 \centering
1130 % Some packages, such as MDW tools, offer better commands for making tables
1131 % than the plain LaTeX2e tabular which is used here.
1132 \begin{tabular}{|c||r||r||r||r||r|}
1133 \hline
1134 Eigenval. & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & Year \\
1135 \hline
1136 $0.4473697$ & $\mathbf{-0.530}$ & $ 0.323$ & $ 0.298$ & $\mathbf{-0.554}$ & $ 0.090$ \\
1137 $0.1941057$ & $\mathbf{-0.547}$ & $ 0.215$ & $ 0.249$ & $-0.293$ & $\mathbf{-0.630}$ \\
1138 $0.0463189$ & $ 0.131$ & $-0.002$ & $-0.128$ & $ 0.242$ & $\mathbf{-0.630}$ \\
1139 $0.0280301$ & $-0.011$ & $ 0.225$ & $ 0.186$ & $ 0.131$ & $ 0.067$ \\
1140 $0.0243231$ & $-0.181$ & $ 0.174$ & $-0.032$ & $-0.216$ & $ 0.352$ \\
1141 $0.0180875$ & $-0.364$ & $ 0.226$ & $ 0.339$ & $-0.136$ & $ 0.113$ \\
1142 $0.0138478$ & $-0.194$ & $-0.048$ & $-0.099$ & $-0.333$ & $ 0.055$ \\
1143 $0.0110575$ & $-0.040$ & $-0.254$ & $-0.154$ & $-0.054$ & $-0.089$ \\
1144 $0.0093587$ & $-0.199$ & $-0.115$ & $ 0.358$ & $-0.234$ & $-0.028$ \\
1145 $0.0084930$ & $ 0.046$ & $ 0.190$ & $ 0.305$ & $ 0.176$ & $ 0.089$ \\
1146 \hline
1147 \end{tabular}
1148 \end{table}
1150 \begin{table}[!t]
1151 % increase table row spacing, adjust to taste
1152 \renewcommand{\arraystretch}{1.6}
1153 \begin{threeparttable}
1154 \caption{Characteristic Patterns of PCA$_{1,2}$ Dimensions \tnote{1}}
1155 \label{fig:style_patterns}
1156 \centering
1157 % Some packages, such as MDW tools, offer better commands for making tables
1158 % than the plain LaTeX2e tabular which is used here.
1159 \begin{tabular}{|p{2.3cm}p{2.4cm}p{2.4cm}p{0cm}|}
1160 % The virtual last column is here because otherwise we get random syntax errors.
1162 \hline \multicolumn{4}{|c|}{PCA$_1$ --- Moyo-oriented, thin-playing player} \\
1163 \centering \begin{psgopartialboard*}{(8,1)(12,6)}
1164 \stone[\marktr]{black}{k}{4}
1165 \end{psgopartialboard*} &
1166 \centering \begin{psgopartialboard*}{(1,2)(5,6)}
1167 \stone{white}{d}{3}
1168 \stone[\marktr]{black}{d}{5}
1169 \end{psgopartialboard*} &
1170 \centering \begin{psgopartialboard*}{(5,1)(10,6)}
1171 \stone{white}{f}{3}
1172 \stone[\marktr]{black}{j}{4}
1173 \end{psgopartialboard*} & \\
1174 \centering $0.274$ & \centering $0.086$ & \centering $0.083$ & \\
1175 \centering high corner/side opening \tnote{2} & \centering high corner approach & \centering high distant pincer & \\
1177 \hline \multicolumn{4}{|c|}{PCA$_1$ --- Territorial, thick-playing player} \\
1178 \centering \begin{psgopartialboard*}{(3,1)(7,6)}
1179 \stone{white}{d}{4}
1180 \stone[\marktr]{black}{f}{3}
1181 \end{psgopartialboard*} &
1182 \centering \begin{psgopartialboard*}{(3,1)(7,6)}
1183 \stone{white}{c}{6}
1184 \stone{black}{d}{4}
1185 \stone[\marktr]{black}{f}{3}
1186 \end{psgopartialboard*} &
1187 \centering \begin{psgopartialboard*}{(3,1)(7,6)}
1188 \stone{black}{d}{4}
1189 \stone[\marktr]{black}{f}{3}
1190 \end{psgopartialboard*} & \\
1191 \centering $-0.399$ & \centering $-0.399$ & \centering $-0.177$ & \\
1192 \centering low corner approach & \centering low corner reply & \centering low corner enclosure & \\
1194 \hline \multicolumn{4}{|c|}{PCA$_2$ --- Territorial, current player \tnote{3}} \\
1195 \centering \begin{psgopartialboard*}{(3,1)(7,6)}
1196 \stone{white}{c}{6}
1197 \stone{black}{d}{4}
1198 \stone[\marktr]{black}{f}{3}
1199 \end{psgopartialboard*} &
1200 \centering \begin{psgopartialboard*}{(3,1)(8,6)}
1201 \stone{white}{d}{4}
1202 \stone[\marktr]{black}{g}{4}
1203 \end{psgopartialboard*} &
1204 \centering \begin{psgopartialboard*}{(4,1)(9,6)}
1205 \stone{black}{d}{4}
1206 \stone{white}{f}{3}
1207 \stone[\marktr]{black}{h}{3}
1208 \end{psgopartialboard*} & \\
1209 \centering $-0.193$ & \centering $-0.139$ & \centering $-0.135$ & \\
1210 \centering low corner reply \tnote{4} & \centering high distant approach/pincer & \centering near low pincer & \\
1212 \hline
1213 \end{tabular}
1214 \begin{tablenotes}
1215 \item [1] We present the patterns in a simplified compact form;
1216 in reality, they are usually somewhat larger and always circle-shaped
1217 (centered on the triangled move).
1218 We omit only pattern segments that are entirely empty.
1219 \item [2] We give some textual interpretation of the patterns, especially
1220 since some of them may not be obvious unless seen in game context; we choose
1221 the descriptions based on the most frequently observer contexts, but of course
1222 the pattern can be also matched in other positions and situations.
1223 \item [3] In the second PCA dimension, we find no correlated patterns;
1224 only uncorrelated and anti-correlated ones.
1225 \item [4] As the second most significant pattern,
1226 we skip a slide follow-up pattern to this move.
1227 \end{tablenotes}
1228 \end{threeparttable}
1229 \end{table}
1231 \begin{table}[!t]
1232 % increase table row spacing, adjust to taste
1233 \renewcommand{\arraystretch}{1.8}
1234 \begin{threeparttable}
1235 \caption{Characteristic Patterns of PCA$_3$ Dimension \tnote{1}}
1236 \label{fig:style_patterns3}
1237 \centering
1238 % Some packages, such as MDW tools, offer better commands for making tables
1239 % than the plain LaTeX2e tabular which is used here.
1240 \begin{tabular}{|p{2.4cm}p{2.4cm}p{2.4cm}p{0cm}|}
1241 % The virtual last column is here because otherwise we get random syntax errors.
1243 \hline \multicolumn{4}{|c|}{PCA$_3$ --- Old-time player} \\
1244 \centering \begin{psgopartialboard*}{(1,3)(5,7)}
1245 \stone{white}{d}{4}
1246 \stone[\marktr]{black}{c}{6}
1247 \end{psgopartialboard*} &
1248 \centering \begin{psgopartialboard*}{(8,1)(12,5)}
1249 \stone[\marktr]{black}{k}{3}
1250 \end{psgopartialboard*} &
1251 \centering \begin{psgopartialboard*}{(1,1)(5,5)}
1252 \stone[\marktr]{black}{c}{3}
1253 \end{psgopartialboard*} & \\
1254 \centering $0.515$ & \centering $0.264$ & \centering $0.258$ & \\
1255 \centering low corner approach & \centering low side or mokuhazushi opening & \centering san-san opening & \\
1257 \hline \multicolumn{4}{|c|}{PCA$_3$ --- Current player} \\
1258 \centering \begin{psgopartialboard*}{(3,1)(7,5)}
1259 \stone{black}{d}{4}
1260 \stone[\marktr]{black}{f}{3}
1261 \end{psgopartialboard*} &
1262 \centering \begin{psgopartialboard*}{(1,1)(5,5)}
1263 \stone[\marktr]{black}{c}{4}
1264 \end{psgopartialboard*} &
1265 \centering \begin{psgopartialboard*}{(1,2)(5,6)}
1266 \stone{black}{d}{3}
1267 \stone{white}{d}{5}
1268 \stone[\marktr]{black}{c}{5}
1269 \end{psgopartialboard*} & \\
1270 \centering $-0.276$ & \centering $-0.273$ & \centering $-0.116$ & \\
1271 \centering low corner enclosure & \centering 3-4 corner opening \tnote{2} & \centering high approach reply & \\
1273 \hline
1274 \end{tabular}
1275 \begin{tablenotes}
1276 \item [1] We cannot use terms ``classic'' and ''modern'' in case of PCA$_3$
1277 since the current patterns are commonplace in games of past centuries
1278 (not included in our training set) and many would call a lot of the old-time patterns
1279 modern inventions. Perhaps we can infer that the latest 21th-century play trends abandon
1280 many of the 20th-century experiments (lower echelon of our by-year samples)
1281 to return to the more ordinary but effective classic patterns.
1282 \item [2] At this point, we skip two patterns already shown elsewhere:
1283 {\em high side/corner opening} and {\em low corner reply}.
1284 \end{tablenotes}
1285 \end{threeparttable}
1286 \end{table}
1288 It is immediately
1289 obvious both from the measured $r$ and visual observation
1290 that by far the most significant vector corresponds very well
1291 to the territoriality of the players,%
1292 \footnote{Cho Chikun, perhaps the best-known
1293 territorial player, is not well visible in the cluster, but he is
1294 positioned around $-0.8$ on the first dimension.}
1295 confirming the intuitive notion that this aspect of style
1296 is the one easiest to pin-point and also
1297 most obvious in the played shapes and sequences
1298 (that can obviously aim directly at taking secure territory
1299 or building center-oriented framework). Thick (solid) play also plays
1300 a role, but these two style dimensions are already
1301 correlated in the prior data.
1303 The other PCA dimensions are somewhat harder to interpret, but there
1304 certainly is significant influence of the styles on the patterns;
1305 the found correlations are all presented in table \ref{fig:style_r}.
1306 (Larger absolute value means better linear correspondence.)
1308 We also list the characteristic spatial patterns of the PCA dimension
1309 extremes (tables \ref{fig:style_patterns}, \ref{fig:style_patterns3}), determined by their coefficients
1310 in the PCA projection matrix --- however, such naive approach
1311 has limited reliability, better methods will have to be researched.%
1312 \footnote{For example, as one of highly ranked ``Takemiya's'' PCA1 patterns,
1313 3,3 corner opening was generated, completely inappropriately;
1314 it reflects some weak ordering in bottom half of the dimension,
1315 not global ordering within the dimension.}
1316 We do not show the other pattern features since they carry no useful
1317 information in the opening stage.%
1318 \footnote{The board distance feature can be useful in some cases,
1319 but here all the spatial patterns are wide enough to reach to the edge
1320 on their own.}
1322 \begin{table}[!t]
1323 % increase table row spacing, adjust to taste
1324 \renewcommand{\arraystretch}{1.4}
1325 \caption{Covariance Measure of Externed-Normalization PCA and~Prior Information}
1326 \label{fig:style_normr}
1327 \centering
1328 % Some packages, such as MDW tools, offer better commands for making tables
1329 % than the plain LaTeX2e tabular which is used here.
1330 \begin{tabular}{|c||r||r||r||r||r|}
1331 \hline
1332 Eigenval. & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & Year \\
1333 \hline
1334 $6.3774289$ & $ \mathbf{0.436}$ & $-0.220$ & $-0.289$ & $ \mathbf{0.404}$ & $\mathbf{-0.576}$ \\
1335 $1.7269775$ & $\mathbf{-0.690}$ & $ 0.340$ & $ 0.315$ & $\mathbf{-0.445}$ & $\mathbf{-0.639}$ \\
1336 $1.1747101$ & $-0.185$ & $ 0.156$ & $ 0.107$ & $-0.315$ & $ 0.320$ \\
1337 $0.8452797$ & $ 0.064$ & $-0.102$ & $-0.189$ & $ 0.032$ & $ 0.182$ \\
1338 $0.8038992$ & $-0.185$ & $ 0.261$ & $ \mathbf{0.620}$ & $ 0.120$ & $ 0.056$ \\
1339 $0.6679533$ & $-0.027$ & $ 0.055$ & $ 0.147$ & $-0.198$ & $ 0.155$ \\
1340 $0.5790000$ & $ 0.079$ & $ \mathbf{0.509}$ & $ 0.167$ & $ 0.294$ & $-0.019$ \\
1341 $0.4971474$ & $ 0.026$ & $-0.119$ & $-0.071$ & $ 0.049$ & $ 0.043$ \\
1342 $0.4938777$ & $-0.061$ & $ 0.061$ & $ 0.104$ & $-0.168$ & $ 0.015$ \\
1343 $0.4888848$ & $ 0.203$ & $-0.283$ & $-0.120$ & $ 0.083$ & $-0.220$ \\
1344 \hline
1345 \end{tabular}
1346 \end{table}
1348 The PCA results presented above do not show much correlation between
1349 the significant PCA dimensions and the $\omega$ and $\alpha$ style dimensions.
1350 However, when we applied the extended vector normalization
1351 (sec. \ref{xnorm}; see table \ref{fig:style_normr}),
1352 some less significant PCA dimensions exhibited clear correlations.%
1353 \footnote{We have found that $c=6$ in the post-processing logistic function
1354 produces the most instructive PCA output on our particular game collection.}
1355 While we do not use the extended normalization results elsewhere since
1356 they produced noticeably less accurate classifiers in all dimensions,
1357 including $\omega$ and $\alpha$, it is instructive to look at the PCA dimensions.
1359 It appears that less-frequent patterns that appear only in the middle-game
1360 phase\footnote{In the middle game, the board is much more filled and thus
1361 particular specific-shape patterns repeat less often.} are defining
1362 for these dimensions, and these are not represented in the pattern vectors
1363 as well as the common opening patterns. E.g. the most characteristic patterns
1364 on the aggressiveness dimension represent moves that make life with small,
1365 unstable groups (connecting kosumi on second line or mouth-shape eyespace
1366 move), while the novel-ranked players seem to like the (in)famous tsuke-nobi
1367 joseki sequence.
1369 We believe that the next step in interpreting our analytical results
1370 will be more refined prior information input
1371 and precise analysis of the outputs by Go experts.
1373 \begin{figure}[!t]
1374 \centering
1375 \includegraphics[width=3.5in,angle=-90]{sociomap}
1376 \caption{Sociomap visualisation. The spatial positioning of players
1377 is based on the expert knowledge, while the node heights (depicted by
1378 contour lines) represent the pattern vectors.%
1379 %The light lines denote coherence-based hierarchical clusters.
1381 \label{fig:sociomap}
1382 \end{figure}
1384 Fig. \ref{fig:sociomap} shows the Sociomap visualisation
1385 as an alternate view of the player relationships and similarity,
1386 as well as correlation between the expert-given style marks
1387 and the PCA decomposition. The four-dimensional style vectors
1388 are used as input for the Sociomap renderer and determine the
1389 spatial positions of players. The height of a node is then
1390 determined using first two PCA dimensions $R_1,R_2$ and their
1391 eigenvalues $\lambda_1,\lambda_2$ as their linear combination:
1392 $$ h=\lambda_1R_1 + \lambda_2R_2 $$
1394 We can observe that the terrain of the sociomap is reasonably
1395 ``smooth'', again demonstrating some level of connection between
1396 the style vectors and data-mined information. High countour density
1397 indicates some discrepancy; in case of Takemiya Masaki and Yi Ch'ang-ho,
1398 this seems to be merely an issue of scale,
1399 while the Rui Naiwei --- Gu Li cliff suggests a genuine problem;
1400 we cannot say now whether it is because of imprecise prior information
1401 or lacking approximation abilities of our model.
1403 \subsection{Style Classification}
1405 %TODO vsude zcheckovat jestli pouzivame stejny cas "we perform, we apply" X "we have performed, ..."
1407 Apart from the PCA-based analysis, we tested the style inference ability
1408 of $k$-NN (sec. \ref{knn}), neural network (sec. \ref{neural-net}),
1409 and Bayes (sec. \ref{naive-bayes}) classifers.
1411 \subsubsection{Reference (Training) Data}
1412 As the~reference data, we use expert-based knowledge presented in section \ref{style-vectors}.
1413 For each reference player, that gives $4$-dimensional \emph{style vector} (each component in the
1414 range of $[1,10]$).\footnote{Since the neural network has activation function with range $[-1,1]$, we
1415 have linearly rescaled the \emph{style vectors} from interval $[1,10]$ to $[-1,1]$ before using the training
1416 data. The network's output was afterwards rescaled back to allow for MSE comparison.}
1418 All input (pattern) vectors were preprocessed using PCA, reducing the input dimension from $400$ to $23$.
1419 We measure the performance on the same reference data using $5$-fold cross validation.
1420 To put our measurements in scale, we also include a~random classifier in our results.
1422 \subsubsection{Results}
1423 The results are shown in the table \ref{crossval-cmp}. Second to fifth columns in the table represent
1424 mean square error (MSE) of the examined styles, $\mathit{Mean}$ is the
1425 mean square error across the styles and finally, the last column $\mathit{Cmp}$
1426 represents $\mathit{Mean}(\mathit{Random classifier}) / \mathit{Mean}(\mathit{X})$ -- comparison of mean square error
1427 of each method with the random classifier. To minimize the
1428 effect of random variables, all numbers were taken as an average of $200$ runs of the cross validation.
1430 Analysis of the performance of $k$-NN classifier for different $k$-values has shown that different
1431 $k$-values are suitable to approximate different styles. Combining the $k$-NN classifiers with the
1432 neural network (so that each style is approximated by the method with lowest MSE in that style)
1433 results in \emph{Joint classifier}, which outperforms all other methods.
1434 The \emph{Joint classifier} has outstanding MSE $3.960$, which is equivalent to standard deviation
1435 of $\sigma = 1.99$ per style.%
1436 \footnote{We should note that the pattern vector for each tested player
1437 was generated over at least few tens of games.}
1439 \begin{table}[!t]
1440 \renewcommand{\arraystretch}{1.4}
1441 \begin{threeparttable}
1442 \caption{Comparison of style classifiers}
1443 \label{crossval-cmp}
1444 \begin{tabular}{|c|c|c|c|c|c|c|}
1445 \hline
1446 %Classifier & $\sigma_\tau$ & $\sigma_\omega$ & $\sigma_\alpha$ & $\sigma_\theta$ & Tot $\sigma$ & $\mathit{RndC}$\\ \hline
1447 %Neural network & 0.420 & 0.488 & 0.365 & 0.371 & 0.414 & 1.82 \\
1448 %$k$-NN ($k=4$) & 0.394 & 0.507 & 0.457 & 0.341 & 0.429 & 1.76 \\
1449 %Random classifier & 0.790 & 0.773 & 0.776 & 0.677 & 0.755 & 1.00 \\ \hline
1450 &\multicolumn{5}{|c|}{MSE}& \\ \hline
1451 {Classifier} & $\tau$ & $\omega$ & $\alpha$ & $\theta$ & {\bf Mean} & {\bf Cmp}\\ \hline
1452 Joint classifier\tnote{1} & 4.04 & {\bf 5.25} & {\bf 3.52} & {\bf 3.05} & {\bf 3.960}& 2.97 \\\hline
1453 Neural network & {\bf 4.03} & 6.15 & {\bf 3.58} & 3.79 & 4.388 & 2.68 \\
1454 $k$-NN ($k=2$) & 4.08 & 5.40 & 4.77 & 3.37 & 4.405 & 2.67 \\
1455 $k$-NN ($k=3$) & 4.05 & 5.58 & 5.06 & 3.41 & 4.524 & 2.60 \\
1456 $k$-NN ($k=1$) & 4.52 & {\bf 5.26} & 5.36 & {\bf 3.09} & 4.553 & 2.59 \\
1457 $k$-NN ($k=4$) & 4.10 & 5.88 & 5.16 & 3.60 & 4.684 & 2.51 \\
1458 Naive Bayes & 4.48 & 6.90 & 5.48 & 3.70 & 5.143 & 2.29 \\
1459 Random class. & 12.26 & 12.33 & 12.40 & 10.11 & 11.776 & 1.00 \\\hline
1461 \end{tabular}
1462 \begin{tablenotes}
1463 \item [1] Note that these measurements have a certain variance.
1464 Since the Joint classifier performance was measured from scratch,
1465 the precise values may not match appropriate cells of the used methods.
1466 \end{tablenotes}
1467 \end{threeparttable}
1468 \end{table}
1470 % TODO presunout konkretni parametry do Appendixu? (neni jich tolik, mozna ne)
1471 \subsubsection{$k$-NN parameters}
1472 All three variants of $k$-NN classifier ($k=2,3,4$) had the weight function
1473 \begin{equation}
1474 \mathit{Weight}(\vec x) = 0.8^{10*\mathit{Distance}(\vec x)}
1475 \end{equation}
1476 The parameters were chosen empirically to minimize the MSE.
1478 \subsubsection{Neural network's parameters}
1479 The neural network classifier had three-layered architecture (one hidden layer)
1480 comprising of these numbers of neurons:
1481 \vspace{4mm}
1482 \noindent
1483 %\begin{table}
1484 \begin{center}
1485 %\caption{Styles}
1486 \begin{tabular}{|c|c|c|}
1487 \hline
1488 \multicolumn{3}{|c|}{Layer} \\\hline
1489 Input & Hidden & Output \\ \hline
1490 23 & 30 & 4 \\ \hline
1491 \end{tabular}
1492 \end{center}
1493 %\end{table}
1494 \vspace{4mm}
1496 The network was trained until the square error on the training set was smaller than $0.0003$.
1497 Due to a small number of input vectors, this only took $20$ iterations of RPROP learning algorithm on average.
1499 \subsubsection{Naive Bayes parameters}
1501 We have chosen $k = 10/7$ as our discretization parameter;
1502 ideally, we would use $k = 1$ to fully cover the style marks
1503 domain, however our training sample is apparently too small for
1504 that.
1506 \section{Proposed Applications}
1508 We believe that our findings might be useful for many applications
1509 in the area of Go support software as well as Go-playing computer engines.
1511 The style analysis can be an excellent teaching aid --- classifying style
1512 dimensions based on player's pattern vector, many study recommendations
1513 can be given, e.g. about the professional games to replay, the goal being
1514 balancing understanding of various styles to achieve well-rounded skill set.
1515 This was also our original aim when starting the research and a user-friendly
1516 tool based on our work is now being created.
1518 We hope that more strong players will look into the style dimensions found
1519 by our statistical analysis --- analysis of most played patterns of prospective
1520 opponents might prepare for the game, but we especially hope that new insights
1521 on strategic purposes of various shapes and general human understanding
1522 of the game might be achieved by investigating the style-specific patterns.
1523 Time by time, new historical game records are still being discovered;
1524 pattern-based classification might help to suggest origin of the games
1525 in Go Archeology.
1527 Classifying playing strength of a pattern vector of a player can be used
1528 e.g. to help determine initial real-world rating of a player before their
1529 first tournament based on games played on the internet; some players especially
1530 in less populated areas could get fairly strong before playing their first
1531 real tournament.
1533 Analysis of pattern vectors extracted from games of Go-playing programs
1534 in light of the shown strength and style distributions might help to
1535 highlight some weaknesses and room for improvements. (However, since
1536 correlation does not imply causation, simply optimizing Go-playing programs
1537 according to these vectors is unlikely to yield good results.)
1538 Another interesting applications in Go-playing programs might be strength
1539 adjustment; the program can classify the player's level based on the pattern
1540 vector from its previous games and auto-adjust its difficulty settings
1541 accordingly to provide more even games for beginners.%
1542 \footnote{The program can also do this based on win-loss statistics,
1543 but pattern vector analysis might converge faster.}
1546 % An example of a floating figure using the graphicx package.
1547 % Note that \label must occur AFTER (or within) \caption.
1548 % For figures, \caption should occur after the \includegraphics.
1549 % Note that IEEEtran v1.7 and later has special internal code that
1550 % is designed to preserve the operation of \label within \caption
1551 % even when the captionsoff option is in effect. However, because
1552 % of issues like this, it may be the safest practice to put all your
1553 % \label just after \caption rather than within \caption{}.
1555 % Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
1556 % option should be used if it is desired that the figures are to be
1557 % displayed while in draft mode.
1559 %\begin{figure}[!t]
1560 %\centering
1561 %\includegraphics[width=2.5in]{myfigure}
1562 % where an .eps filename suffix will be assumed under latex,
1563 % and a .pdf suffix will be assumed for pdflatex; or what has been declared
1564 % via \DeclareGraphicsExtensions.
1565 %\caption{Simulation Results}
1566 %\label{fig_sim}
1567 %\end{figure}
1569 % Note that IEEE typically puts floats only at the top, even when this
1570 % results in a large percentage of a column being occupied by floats.
1573 % An example of a double column floating figure using two subfigures.
1574 % (The subfig.sty package must be loaded for this to work.)
1575 % The subfigure \label commands are set within each subfloat command, the
1576 % \label for the overall figure must come after \caption.
1577 % \hfil must be used as a separator to get equal spacing.
1578 % The subfigure.sty package works much the same way, except \subfigure is
1579 % used instead of \subfloat.
1581 %\begin{figure*}[!t]
1582 %\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}%
1583 %\label{fig_first_case}}
1584 %\hfil
1585 %\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}%
1586 %\label{fig_second_case}}}
1587 %\caption{Simulation results}
1588 %\label{fig_sim}
1589 %\end{figure*}
1591 % Note that often IEEE papers with subfigures do not employ subfigure
1592 % captions (using the optional argument to \subfloat), but instead will
1593 % reference/describe all of them (a), (b), etc., within the main caption.
1596 % An example of a floating table. Note that, for IEEE style tables, the
1597 % \caption command should come BEFORE the table. Table text will default to
1598 % \footnotesize as IEEE normally uses this smaller font for tables.
1599 % The \label must come after \caption as always.
1601 %\begin{table}[!t]
1602 %% increase table row spacing, adjust to taste
1603 %\renewcommand{\arraystretch}{1.3}
1604 % if using array.sty, it might be a good idea to tweak the value of
1605 % \extrarowheight as needed to properly center the text within the cells
1606 %\caption{An Example of a Table}
1607 %\label{table_example}
1608 %\centering
1609 %% Some packages, such as MDW tools, offer better commands for making tables
1610 %% than the plain LaTeX2e tabular which is used here.
1611 %\begin{tabular}{|c||c|}
1612 %\hline
1613 %One & Two\\
1614 %\hline
1615 %Three & Four\\
1616 %\hline
1617 %\end{tabular}
1618 %\end{table}
1621 % Note that IEEE does not put floats in the very first column - or typically
1622 % anywhere on the first page for that matter. Also, in-text middle ("here")
1623 % positioning is not used. Most IEEE journals use top floats exclusively.
1624 % Note that, LaTeX2e, unlike IEEE journals, places footnotes above bottom
1625 % floats. This can be corrected via the \fnbelowfloat command of the
1626 % stfloats package.
1630 \section{Future Research}
1632 Since we are not aware of any previous research on this topic and we
1633 are limited by space and time constraints, plenty of research remains
1634 to be done, in all parts of our analysis --- we have already noted
1635 many in the text above. Most significantly, different methods of generating
1636 and normalizing the $\vec p$ vectors can be explored
1637 and other data mining methods could be investigated.
1638 Better ways of visualising the relationships would be desirable,
1639 together with thorough expert dissemination of internal structure
1640 of the player pattern vectors space:
1641 more professional players should be consulted on the findings
1642 and for style scales calibration.
1644 It can be argued that many players adjust their style by game conditions
1645 (Go development era, handicap, komi and color, time limits, opponent)
1646 or that styles might express differently in various game stages;
1647 these factors should be explored by building pattern vectors more
1648 carefully than by simply considering all moves in all games of a player.
1649 Impact of handicap and uneven games on by-strength
1650 $\vec p$ distribution should be also investigated.
1652 % TODO: Future research --- Sparse PCA
1654 \section{Conclusion}
1655 We have proposed a way to extract summary pattern information from
1656 game collections and combined this with various data mining methods
1657 to show correspondence of our pattern summaries with various player
1658 meta-information like playing strength, era of play or playing style
1659 as ranked by expert players. We have implemented and measured our
1660 proposals in two case studies: per-rank characteristics of amateur
1661 players and per-player style/era characteristics of well-known
1662 professionals.
1664 While many details remain to be worked out,
1665 we have demonstrated that many significant correlations do exist and
1666 it is practically viable to infer the player meta-information from
1667 extracted pattern summaries. We proposed wide range of applications
1668 for such inference. Finally, we outlined some of the many possible
1669 directions of future work in this newly staked research field
1670 on the boundary of Computer Go, Data Mining and Go Theory.
1673 % if have a single appendix:
1674 %\appendix[Proof of the Zonklar Equations]
1675 % or
1676 %\appendix % for no appendix heading
1677 % do not use \section anymore after \appendix, only \section*
1678 % is possibly needed
1680 % use appendices with more than one appendix
1681 % then use \section to start each appendix
1682 % you must declare a \section before using any
1683 % \subsection or using \label (\appendices by itself
1684 % starts a section numbered zero.)
1688 %\appendices
1689 %\section{Proof of the First Zonklar Equation}
1690 %Appendix one text goes here.
1692 %% you can choose not to have a title for an appendix
1693 %% if you want by leaving the argument blank
1694 %\section{}
1695 %Appendix two text goes here.
1698 % use section* for acknowledgement
1699 \section*{Acknowledgment}
1700 \label{acknowledgement}
1702 Foremostly, we are very grateful for detailed input on specific go styles
1703 by Alexander Dinerstein, Motoki Noguchi and V\'{i}t Brunner.
1704 We appreciate helpful comments on our general methodology
1705 by John Fairbairn, T. M. Hall, Cyril H\"oschl, Robert Jasiek, Franti\v{s}ek Mr\'{a}z
1706 and several GoDiscussions.com users. \cite{GoDiscThread}
1707 Finally, we would like to thank Radka ``chidori'' Hane\v{c}kov\'{a}
1708 for the original research idea and acknowledge major inspiration
1709 by R\'{e}mi Coulom's paper \cite{PatElo} on the extraction of pattern information.
1712 % Can use something like this to put references on a page
1713 % by themselves when using endfloat and the captionsoff option.
1714 \ifCLASSOPTIONcaptionsoff
1715 \newpage
1720 % trigger a \newpage just before the given reference
1721 % number - used to balance the columns on the last page
1722 % adjust value as needed - may need to be readjusted if
1723 % the document is modified later
1724 %\IEEEtriggeratref{8}
1725 % The "triggered" command can be changed if desired:
1726 %\IEEEtriggercmd{\enlargethispage{-5in}}
1728 % references section
1730 % can use a bibliography generated by BibTeX as a .bbl file
1731 % BibTeX documentation can be easily obtained at:
1732 % http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
1733 % The IEEEtran BibTeX style support page is at:
1734 % http://www.michaelshell.org/tex/ieeetran/bibtex/
1735 \bibliographystyle{IEEEtran}
1736 % argument is your BibTeX string definitions and bibliography database(s)
1737 \bibliography{gostyle}
1739 % <OR> manually copy in the resultant .bbl file
1740 % set second argument of \begin to the number of references
1741 % (used to reserve space for the reference number labels box)
1742 %\begin{thebibliography}{1}
1744 %\bibitem{MasterMCTS}
1746 %\end{thebibliography}
1748 % biography section
1750 % If you have an EPS/PDF photo (graphicx package needed) extra braces are
1751 % needed around the contents of the optional argument to biography to prevent
1752 % the LaTeX parser from getting confused when it sees the complicated
1753 % \includegraphics command within an optional argument. (You could create
1754 % your own custom macro containing the \includegraphics command to make things
1755 % simpler here.)
1756 %\begin{biography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{mshell}}]{Michael Shell}
1757 % or if you just want to reserve a space for a photo:
1759 %\begin{IEEEbiography}{Petr Baudi\v{s}}
1760 %Biography text here.
1761 %\end{IEEEbiography}
1763 % if you will not have a photo at all:
1764 \begin{IEEEbiographynophoto}{Petr Baudi\v{s}}
1765 Received BSc degree in Informatics at Charles University, Prague in 2009,
1766 currently a graduate student.
1767 Doing research in the fields of Computer Go, Monte Carlo Methods
1768 and Version Control Systems.
1769 Plays Go with the rank of 2-kyu on European tournaments
1770 and 2-dan on the KGS Go Server.
1771 \end{IEEEbiographynophoto}
1773 \begin{IEEEbiographynophoto}{Josef Moud\v{r}\'{i}k}
1774 Received BSc degree in Informatics at Charles University, Prague in 2009,
1775 currently a graduate student.
1776 Doing research in the fields of Neural Networks and Cognitive Sciences.
1777 His Go skills are not worth mentioning.
1778 \end{IEEEbiographynophoto}
1780 % insert where needed to balance the two columns on the last page with
1781 % biographies
1782 %\newpage
1784 %\begin{IEEEbiographynophoto}{Jane Doe}
1785 %Biography text here.
1786 %\end{IEEEbiographynophoto}
1788 % You can push biographies down or up by placing
1789 % a \vfill before or after them. The appropriate
1790 % use of \vfill depends on what kind of text is
1791 % on the last page and whether or not the columns
1792 % are being equalized.
1794 %\vfill
1796 % Can be used to pull up biographies so that the bottom of the last one
1797 % is flush with the other column.
1798 %\enlargethispage{-5in}
1802 % that's all folks
1803 \end{document}