| History-class {TrueSkillThroughTime} | R Documentation |
History
Description
History class
Arguments
composition |
A list of list of player's names (id). Each position of the list is a list that represents the teams of a game, so the latter must contain vectors of names representing the composition of each team in that game. |
results |
A list of numeric vectors, representing the outcome of each
game. It must have the same
length as the |
times |
A numeric vector, the timestamp of each game. It must have the
same length as the |
priors |
A hash object, a dictionary of |
mu |
A number, the prior mean. The deafult value is: |
sigma |
A number, the prior standar deviation. The deafult value is:
|
beta |
A number, the standard deviation of the performance. The default
value is: |
gamma |
A number, the amount of uncertainty (standar deviation) added to
the estimates between events. The default value is: |
p_draw |
A number, the probability of a draw. The default value is
|
epsilon |
A number, the convergence threshold. Used to stop the convergence procedure. The default value is |
iterations |
A number, the maximum number of iterations for convergence. Used to stop the convergence procedure. The default value is |
Value
History object
Fields
sizeA number, the amount of games.
batchesA vector of
Batchobjects. Where the games that occur at the same timestamp live.agentsA hash, a dictionary indexed by the players' name (id).
timeA boolean, indicating whether the history was initialized with timestamps or not.
muA number, the default prior mean in this particular
HistoryobjectsigmaA number, the default prior standard deviation in this particular
HistoryobjectbetaA number, the default standar deviation of the performance in this particular
HistoryobjectgammaA number, the default dynamic uncertainty in this particular
Historyobjectp_drawA number, the probability of a draw in this particular
Historyobjecth_epsilonA number, the convergence threshold in this particular
Historyobjecth_iterationsA number, the maximum number of iterations for convergence in this particular
Historyobject
Methods
convergence(epsilon = NA, iterations = NA, verbose = TRUE)initialize( composition, results = list(), times = c(), priors = hash(), mu = MU, sigma = SIGMA, beta = BETA, gamma = GAMMA, p_draw = P_DRAW, epsilon = EPSILON, iterations = ITERATIONS )learning_curves()log_evidence()
Examples
c1 = list(c("a"),c("b"))
c2 = list(c("b"),c("c"))
c3 = list(c("c"),c("a"))
composition = list(c1,c2,c3)
h = History(composition, gamma=0.0)
trueskill_learning_curves = h$learning_curves()
ts_a = trueskill_learning_curves[["a"]]
ts_a[[1]]$N; ts_a[[2]]$N
ts_a[[1]]$t; ts_a[[2]]$t
h$convergence()
trueskillThrougTime_learning_curves = h$learning_curves()
ttt_a = trueskillThrougTime_learning_curves[["a"]]
ttt_a[[1]]$N; ttt_a[[2]]$N
ttt_a[[1]]$t; ttt_a[[2]]$t
## Not run:
# Synthetic example
library(hash)
N = 100
skill <- function(experience, middle, maximum, slope){
return(maximum/(1+exp(slope*(-experience+middle)))) }
target = skill(seq(N), N/2, 2, 0.075)
opponents = rnorm(N,target,0.5)
composition = list(); results = list(); times = c(); priors = hash()
for(i in seq(N)){composition[[i]] = list(c("a"), c(toString(i)))}
for(i in
seq(N)){results[[i]]=if(rnorm(1,target[i])>rnorm(1,opponents[i])){c(1,0)}else{c(0,1)}}
for(i in seq(N)){times = c(times,i)}
for(i in seq(N)){priors[[toString(i)]] = Player(Gaussian(opponents[i],0.2))}
h = History(composition, results, times, priors, gamma=0.1)
h$convergence(); lc_a = h$learning_curves()$a; mu = c()
for(tp in lc_a){mu = c(mu,tp[[2]]@mu)}
plot(target)
lines(mu)
# Plotting learning curves
# First solve your own example. Here is a dummy one.
agents <- c("a", "b", "c", "d", "e")
composition <- list()
for (i in 1:500) {
who = sample(agents, 2)
composition[[i]] <- list(list(who[1]), list(who[2]))
}
h <- History(composition = composition, gamma = 0.03, sigma = 1.0)
h$convergence(iterations=6)
# Then plot some learning curves
lc <- h$learning_curves()
colors <- c(rgb(0.2,0.2,0.8), rgb(0.2,0.8,0.2), rgb(0.8,0.2,0.2))
colors_alpha <- c(rgb(0.2,0.2,0.8,0.2), rgb(0.2,0.8,0.2,0.2), rgb(0.8,0.2,0.2,0.2))
plot(0,0, xlim = c(0, 500), ylim = c(-1, 1), xlab = "t", ylab = "skill", type = "n")
for (i in 1:3) {
agent <- agents[i]
t <- c(); mu <- c(); sigma <- c()
for(x in lc[[agent]]){
t <- c(t, x$t )
mu <- c(mu, x$N@mu)
sigma <- c(sigma, x$N@sigma)
}
lines(t, mu, col = colors[i], lwd = 2, type = "l")
polygon(c(t, rev(t)), c(mu + sigma, rev(mu - sigma)), col = colors_alpha[i], border = NA)
}
legend("topright", legend = agents[1:3], col = colors, lwd = 2)
## End(Not run)