...
 
Commits (15)
......@@ -4,3 +4,4 @@
.Rproj.user
/.RData
.Rbuildignore
data/ad
Package: queueingnetworkR
Title: What the Package Does (one line, title case)
Version: 0.0.0.9000
[email protected]: person("First", "Last", email = "[email protected]", role = c("aut", "cre"))
Description: What the package does (one paragraph).
Depends: R (>= 3.4.3)
License: What license is it under?
Title: Simulation and Estimation of a Geom/Geom/infty Tandem Network
Version: 0.1.0
[email protected]: person("Sebastian", "Schweer", email = "[email protected]", role = c("aut", "cre"))
Description: Simulation and Documentation for the Paper "Nonparametric Estimation of the Service Time Distribution in Discrete-Time Queueing Networks" by Cornelia Wichelhaus and Sebastian Schweer
Depends: R (>= 3.4.2), utils, stats, ggplot2, plyr, reshape2
License: MIT
Encoding: UTF-8
LazyData: true
Suggests:
testthat
RoxygenNote: 6.0.1
RoxygenNote: 6.1.0
# Generated by roxygen2: do not edit by hand
export(present_estimates)
export(queueing_network_poi_geom)
importFrom(ggplot2,aes)
importFrom(ggplot2,geom_step)
importFrom(ggplot2,ggplot)
importFrom(ggplot2,ggtitle)
importFrom(ggplot2,theme_bw)
importFrom(ggplot2,ylab)
importFrom(ggplot2,ylim)
importFrom(plyr,adply)
importFrom(reshape2,melt)
importFrom(stats,dgeom)
importFrom(stats,pgeom)
importFrom(stats,rbinom)
importFrom(stats,rgeom)
importFrom(stats,rpois)
importFrom(utils,head)
importFrom(utils,setTxtProgressBar)
importFrom(utils,tail)
importFrom(utils,txtProgressBar)
cross_covariance <- function(data, node_1, node_2, lag, nobs){
if(is.null(nobs)){
nobs = nrow(data)
}
if(!is.null(nobs) & nobs > nrow(data)){
nobs = nrow(data)
}
if(lag < nobs){
if(node_1 == 1){
arrival = data$arrival_1
} else if (node_1 ==2){
arrival = data$arrival_1
} else {
message("Arrival node not contained in the model")
}
if(node_2 == 1){
departure = data$departure_1
} else if (node_2 ==2){
departure = data$departure_1
} else {
message("Departure node not contained in the model")
}
arrival_k = head(arrival, (nobs - lag))
departure_k = tail(departure, (nobs - lag))
### Following formula (3) in Schweer/Wichelhaus
estimator = 1/nobs*(sum(arrival_k*departure_k) - sum(arrival_k)*mean(departure) - sum(departure_k)*mean(arrival) + mean(arrival)*(departure))
} else {
estimator = 0
}
return(estimator)
}
# Following Lemma 9 in Edelmann/Wichelhaus
deconvoluting_alpha <- function(number_of_steps, alpha_1, alpha_2, p_12, p_21){
M_n = min(which(alpha_1 > 0))
if(is.finite(M_n)){
length_1 = length(alpha_1) - M_n + 1
length_2 = length(alpha_2) - M_n
max_num_steps = min(length_1, length_2, number_of_steps)
if(max_num_steps > 1){
v_n = numeric(length = max_num_steps)
v_n[1] = (1 - p_12)* alpha_2[M_n + 1] / (alpha_1[M_n] * (1 - p_21) * p_12)
for(idx in c(2:max_num_steps)){
alpha_1_k = alpha_1[(M_n + idx -1):(M_n + 1)]
v_n[idx] = (1 - p_12)* alpha_2[M_n + idx] / (alpha_1[M_n] * (1 - p_21) * p_12) - (sum(alpha_1_k*v_n[1:(idx-1)]))/alpha_1[M_n]
}
return(v_n)
} else {
message("Not enough data points for deconvolution")
}
} else {
message("Alpha Estimator consists of 0 values")
}
cross_covariance <- function(data, node_1, node_2, lag, nobs){
if(is.null(nobs)){
nobs = nrow(data)
}
if(!is.null(nobs) & nobs > nrow(data)){
nobs = nrow(data)
}
if(lag < nobs){
if(node_1 == 1){
arrival = data$arrival_1
} else if (node_1 ==2){
arrival = data$arrival_2
} else {
message("Arrival node not contained in the model")
}
if(node_2 == 1){
departure = data$departure_1
} else if (node_2 ==2){
departure = data$departure_2
} else {
message("Departure node not contained in the model")
}
arrival_k = head(arrival, (nobs - lag))
departure_k = tail(departure, (nobs - lag))
### Following formula (3) in Schweer/Wichelhaus
estimator = 1/nobs*(sum(arrival_k*departure_k)) - 1/nobs*sum(arrival_k)*mean(departure) - 1/nobs*sum(departure_k)*mean(arrival) + (nobs - lag)/nobs*mean(arrival)*mean(departure)
} else {
estimator = 0
}
return(estimator)
}
#' Deconvolute the Estimator to Obtain Service Time Distribution
#'
#' Implementation of the theoretical result of Lemma 9 in Edelmann, Wichelhaus
#'
#' @param number_of_steps Integer, number of steps in deconvolution.
#' @param alpha_1 Numeric vector, estimator "hat" alpha_n^2(...) in Lemma 9.
#' @param alpha_2 Numeric vector, estimator "hat" alpha_n^2(...) in Lemma 9.
#' @param p_12 Numeric, in [0,1], routing probability from node 1 to node 2
#' @param p_21 Numeric, in [0,1], routing probability from node 2 to node 1
#' @return Numeric vector , corresponding to "hat" v_n (...) in Lemma 9.
deconvoluting_alpha <- function(number_of_steps, alpha_1, alpha_2, p_12, p_21){
M_n = min(which(alpha_1 > 0))
if(is.finite(M_n)){
length_1 = length(alpha_1) - M_n + 1
length_2 = length(alpha_2) - M_n
max_num_steps = min(length_1, length_2, number_of_steps)
if(max_num_steps > 1){
v_n = numeric(length = max_num_steps)
v_n[1] = (1 - p_12)* alpha_2[M_n + 1] / (alpha_1[M_n] * (1 - p_21) * p_12)
for(idx in c(2:max_num_steps)){
alpha_1_k = alpha_1[(M_n + idx -1):(M_n + 1)]
v_n[idx] = (1 - p_12)* alpha_2[M_n + idx] / (alpha_1[M_n] * (1 - p_21) * p_12) - (sum(alpha_1_k*v_n[1:(idx-1)]))/alpha_1[M_n]
}
return(v_n)
} else {
message("Not enough data points for deconvolution")
}
} else {
message("Alpha Estimator consists of 0 values")
}
}
\ No newline at end of file
estimate_g2 <- function(max_lag, p_12, p_21, ...){
data = queueing_network_poi_geom(p_12 = p_12, p_21 = p_21, ...)
alpha_12_hat = sapply(c(1:max_lag),
cross_covariance,
data = data, node_1 = 1, node_2 = 2, nobs = NULL)
alpha_11_hat = sapply(c(1:max_lag),
cross_covariance,
data = data, node_1 = 1, node_2 = 1, nobs = NULL)
g_2_est = deconvoluting_alpha(max_lag,
alpha_1 = alpha_11_hat,
alpha_2 = alpha_12_hat,
p_12 = p_12,
p_21 = p_21)
return(g_2_est)
}
#' Helper Function to Apply Theorem 4 in Edelmann, Wichelhaus
#'
#' Creates a cdf estimator from a pgf estimator.
#'
#' @param g2_est Numeric vector, the pgf estimation of the service
#' time distribution
#'
estimate_G2 <- function(g2_est){
trunc_g2_est = ifelse(g2_est > 0, g2_est, 0)
G2_est_hat = cumsum(trunc_g2_est)
trunc_G2_est_hat = ifelse(G2_est_hat < 1, G2_est_hat, 1)
return(trunc_G2_est_hat)
}
\ No newline at end of file
# library(queueingnetworkR)
#
# distribution1 <-
# replicate(1, {
# p_12 = 0.3
# p_21 = 0.4
# lambda_1 = 0.25
# lambda_2 = 0.4
# G_1 = 0.3
# G_2 = 0.4
#
# qn_1 <- queueing_network_poi_geom(100000, 50000, p_12 = p_12, p_21 = p_21,
# lambda_1 = lambda_1, lambda_2 = lambda_2,
# G_1 = G_1, G_2 = G_2, progress = TRUE)
#
# alpha_1_1 = queueingnetworkR:::cross_covariance(qn_1, node_1 = 1, node_2 = 1, lag = 1, nobs = NULL)
# return(alpha_1_1*(1 - p_12*p_21)/(lambda_1*(1-p_12)))
# })
#
# distribution2 <-
# replicate(20, {
# p_12 = 0.3
# p_21 = 0.4
# lambda_1 = 0.2
# lambda_2 = 0.6
# G_1 = 0.8
# G_2 = 0.2
#
# qn_1 <- queueing_network_poi_geom(1000, 1000, p_12 = p_12, p_21 = p_21,
# lambda_1 = lambda_1, lambda_2 = lambda_2,
# G_1 = G_1, G_2 = G_2, progress = TRUE)
#
# alpha_1_1 = queueingnetworkR:::cross_covariance(qn_1, node_1 = 1, node_2 = 2, lag = 1, nobs = NULL)
# return(alpha_1_1*(1 - p_12*p_21)/(lambda_1*(1-p_12)))
# })
# distribution3 <-
# replicate(20, {
# p_12 = 0.3
# p_21 = 0.4
# lambda_1 = 0.2
# lambda_2 = 0.6
# G_1 = 0.8
# G_2 = 0.2
#
# qn_1 <- queueing_network_poi_geom(1000, 1000, p_12 = p_12, p_21 = p_21,
# lambda_1 = lambda_1, lambda_2 = lambda_2,
# G_1 = G_1, G_2 = G_2, progress = TRUE)
#
# alpha_1_1 = queueingnetworkR:::cross_covariance(qn_1, node_1 = 2, node_2 = 1, lag = 1, nobs = NULL)
# return(alpha_1_1*(1 - p_12*p_21)/(lambda_1*(1-p_12)))
# })
# distribution4 <-
# replicate(20, {
# p_12 = 0.3
# p_21 = 0.4
# lambda_1 = 0.2
# lambda_2 = 0.6
# G_1 = 0.8
# G_2 = 0.2
#
# qn_1 <- queueing_network_poi_geom(1000, 1000, p_12 = p_12, p_21 = p_21,
# lambda_1 = lambda_1, lambda_2 = lambda_2,
# G_1 = G_1, G_2 = G_2, progress = TRUE)
#
# alpha_1_1 = queueingnetworkR:::cross_covariance(qn_1, node_1 = 2, node_2 = 2, lag = 1, nobs = NULL)
# return(alpha_1_1*(1 - p_12*p_21)/(lambda_1*(1-p_12)))
# })
# distribution5 <-
# replicate(20, {
# p_12 = 0.3
# p_21 = 0.4
# lambda_1 = 0.2
# lambda_2 = 0.6
# G_1 = 0.8
# G_2 = 0.2
#
# qn_1 <- queueing_network_poi_geom(1000, 1000, p_12 = p_12, p_21 = p_21,
# lambda_1 = lambda_1, lambda_2 = lambda_2,
# G_1 = G_1, G_2 = G_2, progress = TRUE)
#
# alpha_1_1 = queueingnetworkR:::cross_covariance(qn_1, node_1 = 1, node_2 = 2, lag = 2, nobs = NULL)
# return(alpha_1_1*(1 - p_12*p_21)/(lambda_1*(1-p_12)))
# })
# distribution6 <-
# replicate(20, {
# p_12 = 0.3
# p_21 = 0.4
# lambda_1 = 0.2
# lambda_2 = 0.6
# G_1 = 0.8
# G_2 = 0.2
#
# qn_1 <- queueing_network_poi_geom(1000, 1000, p_12 = p_12, p_21 = p_21,
# lambda_1 = lambda_1, lambda_2 = lambda_2,
# G_1 = G_1, G_2 = G_2, progress = TRUE)
#
# alpha_1_1 = queueingnetworkR:::cross_covariance(qn_1, node_1 = 1, node_2 = 2, lag = 3, nobs = NULL)
# return(alpha_1_1*(1 - p_12*p_21)/(lambda_1*(1-p_12)))
# })
#
# distribution6 <-
# replicate(20, {
# p_12 = 0.3
# p_21 = 0.4
# lambda_1 = 0.2
# lambda_2 = 0.6
# G_1 = 0.8
# G_2 = 0.2
#
# qn_1 <- queueing_network_poi_geom(1000, 1000, p_12 = p_12, p_21 = p_21,
# lambda_1 = lambda_1, lambda_2 = lambda_2,
# G_1 = G_1, G_2 = G_2, progress = TRUE)
#
# alpha_1_1 = queueingnetworkR:::cross_covariance(qn_1, node_1 = 1, node_2 = 1, lag = 1, nobs = NULL)
# return(alpha_1_1*(1 - p_12*p_21)/(lambda_1*(1-p_12)))
# })
#' Presents the Results of the Cross-Covariance Estimation
#'
#' Given a parametrization of a Tandem Netowrk, this funciton produces a number
#' of repetitions of simulation and estimation of the servive time G2 at the
#' second node, and presents the results as data frame and plot.
#'
#' @param n_reps Integer, number of repetitions of simulation and estimation to
#' be calculated.
#' @param max_lag Integer, length of the estimated service time distribution.
#' @param ... Parameters passed on to the function estimate_g2.
#' @return A list: result_df is the data frame containing the raw estimators with
#' a column rep_id for each repetition, delta_reulst_df contains the same result, with
#' each value normalized by the theoretical g_2, plot_result and plot_delta_result are
#' ggplots for the result data sets.
#' @importFrom ggplot2 ggplot geom_step aes ylim theme_bw ggtitle ylab
#' @importFrom plyr adply
#' @importFrom reshape2 melt
#' @importFrom stats dgeom pgeom
#' @export
present_estimates <- function(n_reps, max_lag, G_2, ...){
index_set = c(1:n_reps)
result_set = sapply(index_set,
function(x){cat("\nRun", x, "of", n_reps, "\n")
return(estimate_g2(max_lag, G_2, ...))},
simplify = TRUE)
result_df = as.data.frame(t(result_set))
colnames(result_df) = paste0("est_g2_", c(1:(max_lag - 1)))
result_G2_df = adply(result_df, 1, estimate_G2)[, c((max_lag):(2*(max_lag - 1)))]
# Theoretical distributions
g_2_true = as.numeric(dgeom(c(0:(max_lag - 2)), G_2))
G_2_true = as.numeric(pgeom(c(0:(max_lag - 2)), G_2))
# Append the true distribution to plot it
result_G2_df = rbind(G_2_true, result_G2_df)
colnames(result_G2_df) = paste0("est_G2_", c(1:(max_lag - 1)))
delta_result_df = sweep(result_df, 2, g_2_true)
delta_result_G2_df = sweep(result_G2_df, 2, G_2_true)
colnames(delta_result_df) = paste0("delta_", colnames(result_df))
colnames(delta_result_G2_df) = paste0("delta_", colnames(result_G2_df))
result_df$rep_id = as.factor(index_set)
#delta_result_df$rep_id = as.factor(index_set)
result_G2_df$rep_id = as.factor(c("True", index_set))
levels(result_G2_df$rep_id) <- c("True Dist", index_set)
#delta_result_G2_df$rep_id = as.factor(index_set)
plot_result_G2_df = melt(result_G2_df, id.vars = 'rep_id')
plot_result_G2 = ggplot(plot_result_G2_df, aes(x = variable, y = value)) +
geom_step(aes(linetype = rep_id, group = rep_id)) +
ylim(0,1) +
theme_bw() +
ggtitle(paste0("Distribution of ", n_reps, " repetitions of the Cross-Covariance Estimator")) +
ylab("G_2")
#plot_delta_result_df = melt(delta_result_df, id.vars = 'rep_id')
#plot_delta_result = ggplot(plot_delta_result_df, aes(x = variable, y = value)) + geom_line(aes(color = rep_id, group = rep_id))
return(list(result_df = result_df
, result_G2_df = result_G2_df
#, delta_result_df = delta_result_df
#, delta_result_G2_df = delta_result_G2_df
, plot_result_G2 = plot_result_G2
#, plot_delta_result = plot_delta_result
))
}
#
#
# load("~/Downloads/resultate.zip")
# present <- rbind(firstrun$result_G2_df, c(0.5,pgeom(c(1:9), 0.5), "Null"))
#
# library(ggplot2)
# library(reshape2)
# library(plyr)
# result_df <- firstrun$result_df
# result_G2_df <- rbind(as.numeric(c(0.5,pgeom(c(1:9), 0.5), NA)), firstrun$result_G2_df)
#
# result_G2_df$rep_id[1] <- "True Dist."
#
# max_lag <- 8
# G_2 <- 0.5
# index_set <- c(c(1:3), "True Dist")
#
# g_2_true = dgeom(c(0:(max_lag)), G_2)
# G_2_true = pgeom(c(0:(max_lag)), G_2)
# delta_result_df = sweep(result_df, 2, g_2_true)
# delta_result_G2_df = sweep(result_G2_df, 2, G_2_true)
# colnames(delta_result_df) = paste0("delta_", colnames(result_df))
# colnames(delta_result_G2_df) = paste0("delta_", colnames(result_G2_df))
#
# result_df$rep_id = as.factor(index_set)
# delta_result_df$rep_id = as.factor(index_set)
# result_G2_df$rep_id = as.factor(index_set)
# levels(result_G2_df$rep_id) <- c("True Dist", c(1:3))
# result_G2_df <- cbind(as.numeric(c(0,0,0,0)), result_G2_df)
# colnames(result_G2_df) <- c("est_G2_0", colnames(result_G2_df[2:11]))
# delta_result_G2_df$rep_id = as.factor(index_set)
#
# plot_result_G2_df = melt(result_G2_df, id.vars = 'rep_id')
# plot_result_G2 = ggplot(plot_result_G2_df, aes(x = variable, y = value)) +
# geom_step(aes(linetype = rep_id, group = rep_id)) +
# ylim(0,1) +
# theme_bw() +
# ggtitle("Distribution of 3 repetitions of the Cross-Covariance Estimator") +
# ylab("G_2")
#
# plot_delta_result_df = melt(delta_result_df, id.vars = 'rep_id')
# plot_delta_result = ggplot(plot_delta_result_df, aes(x = variable, y = value)) + geom_line(aes(color = rep_id, group = rep_id))
#
# plot_delta_result
# plot_result_G2
\ No newline at end of file
This diff is collapsed.
# Simulations and Documentation
## Description
Simulation and Documentation for the Paper "Nonparametric Estimation of the Service Time Distribution in Discrete-Time Queueing Networks"
## List of Files
### queueing<sub>network.R</sub>
Simulation of a two node QN.
### spa<sub>queueingnetwork.RProj</sub>
RProject used in RStudio.
### README.org
# Simulations and Documentation
## Description
Simulation and Documentation for the Paper "Nonparametric Estimation of the Service Time Distribution in Discrete-Time Queueing Networks"
## List of Files
### queueing<sub>network.R</sub>
Simulation of a two node QN.
### spa<sub>queueingnetwork.RProj</sub>
RProject used in RStudio.
### README.org
ORG-File in EMACS, creates README.md.
\ No newline at end of file
......@@ -8,3 +8,9 @@ Simulation of a two node QN.
RProject used in RStudio.
*** README.org
ORG-File in EMACS, creates README.md.
* TODOs
* References :ATTACH:
:PROPERTIES:
:Attachments: EdelmannWichelhaus.pdf REVIEWSchweer-Wichelhaus.pdf SchweerWichelhausFINAL.pdf
:ID: adec9048-74c9-4b84-b72c-2df7836d5e4c
:END:
---
title: "README"
author: "Sebastian Schweer"
date: "September 15, 2018"
output: html_document
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
## Live demontration
Here is a running example to try out:
```{r}
p_12 = 0.1
p_21 = 0.7
n_obs = 1000
burn_in = 5000
lambda_1 = 0.8
lambda_2 = 0.3
G_1 = 0.2
G_2 = 0.5
firstrun <- present_estimates(n_reps = 10, max_lag = 10, p_12 = p_12, p_21 = p_21, n_obs = n_obs, burn_in = burn_in,
lambda_1 = lambda_1, lambda_2 = lambda_2, G_1 = G_1, G_2 = G_2,
progress = TRUE)
firstrun
ggsave("result_G2_plot.png", plot = firstrun$plot_result_G2, device = "png")
ggsave("delta_result_plot.png", plot = firstrun$plot_delta_result, device = "png")
system(paste0("mpack -s 'Skript durchgelaufen mit ", n_obs, " Beobachtungen: Plot G2' result_G2_plot.png [email protected]"))
system(paste0("mpack -s 'Skript durchgelaufen mit ", n_obs, " Beobachtungen: Plot G2' delta_result_plot.png [email protected]"))
```
This leads to plots:
```{r, echo=FALSE}
test$plot_result_G2
```
As well as
```{r, echo=FALSE}
test$plot_delta_result
```
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cross_covariance_estimator.R
\name{deconvoluting_alpha}
\alias{deconvoluting_alpha}
\title{Deconvolute the Estimator to Obtain Service Time Distribution}
\usage{
deconvoluting_alpha(number_of_steps, alpha_1, alpha_2, p_12, p_21)
}
\arguments{
\item{number_of_steps}{Integer, number of steps in deconvolution.}
\item{alpha_1}{Numeric vector, estimator "hat" alpha_n^2(...) in Lemma 9.}
\item{alpha_2}{Numeric vector, estimator "hat" alpha_n^2(...) in Lemma 9.}
\item{p_12}{Numeric, in [0,1], routing probability from node 1 to node 2}
\item{p_21}{Numeric, in [0,1], routing probability from node 2 to node 1}
}
\value{
Numeric vector , corresponding to "hat" v_n (...) in Lemma 9.
}
\description{
Implementation of the theoretical result of Lemma 9 in Edelmann, Wichelhaus
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimate_g2.R
\name{estimate_G2}
\alias{estimate_G2}
\title{Helper Function to Apply Theorem 4 in Edelmann, Wichelhaus}
\usage{
estimate_G2(g2_est)
}
\arguments{
\item{g2_est}{Numeric vector, the pgf estimation of the service
time distribution}
}
\description{
Creates a cdf estimator from a pgf estimator.
}
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/present_estimates.R
\name{present_estimates}
\alias{present_estimates}
\title{Presents the Results of the Cross-Covariance Estimation}
\usage{
present_estimates(n_reps, max_lag, G_2, ...)
}
\arguments{
\item{n_reps}{Integer, number of repetitions of simulation and estimation to
be calculated.}
\item{max_lag}{Integer, length of the estimated service time distribution.}
\item{...}{Parameters passed on to the function estimate_g2.}
}
\value{
A list: result_df is the data frame containing the raw estimators with
a column rep_id for each repetition, delta_reulst_df contains the same result, with
each value normalized by the theoretical g_2, plot_result and plot_delta_result are
ggplots for the result data sets.
}
\description{
Given a parametrization of a Tandem Netowrk, this funciton produces a number
of repetitions of simulation and estimation of the servive time G2 at the
second node, and presents the results as data frame and plot.
}
......@@ -4,8 +4,8 @@
\alias{queueing_network_poi_geom}
\title{Simulate a Two-Node Queueing Network}
\usage{
queueing_network_poi_geom(n_obs, burn_in, p_12, p_21, lambda_1, lambda_2, G_1,
G_2)
queueing_network_poi_geom(n_obs, burn_in, p_12, p_21, lambda_1, lambda_2,
G_1, G_2, progress = FALSE)
}
\arguments{
\item{n_obs}{Integer, number of observations to be analyzed}
......
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/queueing_network.R
\name{simulate_single_customer}
\alias{simulate_single_customer}
\title{Simulate the movement of a single customer through the Network}
\usage{
simulate_single_customer(enter_node, p_12, p_21, G_1, G_2)
}
\arguments{
\item{enter_node}{Entry node of the customer, either 1 or 2.}
\item{p_12}{Numeric, in [0,1], routing probability from node 1 to node 2}
\item{p_21}{Numeric, in [0,1], routing probability from node 2 to node 1}
\item{G_1}{Numeric (in [0,1]), parameter steering the (Geometric) service
time distribution at node 1}
\item{G_2}{Numeric (in [0,1]), parameter steering the (Geometric) service
time distribution at node 2}
}
\value{
Numeric vector, consisitng of (integer) service times, starting at
the entry node of the customer
}
\description{
For any singular customer, thsi function calculates the service times and
routing pathways for the journey through the network
}
......@@ -24,3 +24,11 @@ test_that("Estimator is 0 if Observations insufficient", {
expect_equal(cross_covariance(data, 1, 2, 2, 1), 0)
expect_equal(cross_covariance(data, 1, 2, 5, NULL), 0)
})
test_that("Estimator is correct", {
expect_equal(cross_covariance(data, 1, 2, 3, 4),1/4*0 - 1/4*1/4*17 +1/4*1/4*1/4*7.8)
expect_equal(cross_covariance(data, 1, 1, 3, 4),1/4*0 - 1/4*0*1/4*4 - 1/4*1/4*0 +1/4*1/4*1*1/4*4)
expect_equal(cross_covariance(data, 1, 1, 2, 4),1/4*0 - 1/4*0*1/4*4 - 1/4*1/4*2 +2/4*1/4*1*1/4*4)
expect_equal(cross_covariance(data, 1, 1, 1, 4),1/4*0 - 1/4*1*1/4*4 - 1/4*1/4*3 +3/4*1/4*1*1/4*4)
expect_equal(cross_covariance(data, 1, 2, 2, 4),1/4*0 - 1/4*1/4*16.8 +2/4*1/4*1/4*7.8)
})
\ No newline at end of file
......@@ -25,4 +25,5 @@ test_that("Result is Correct", {
expect_equal(deconvoluting_alpha(2, alpha_1[1:2], alpha_2[1:3], p_12 = 0.5, p_21 = 0.5), c(0,1))
expect_equal(deconvoluting_alpha(2, alpha_1[1:2], alpha_2[1:3], p_12 = 0.7, p_21 = 0.5), c(0,0.1*0.3/(0.5*0.7*0.2)))
expect_equal(deconvoluting_alpha(2, alpha_1[1:2], c(1,1,1), p_12 = 0.5, p_21 = 0.5), c(10,-5))
expect_equal(deconvoluting_alpha(2, c(2,0.3), c(1,1,1), p_12 = 0.5, p_21 = 0.5), c(1,0.85))
})
context("test-simulation_estimation.R")
p_12 = 0.1
p_21 = 0.2
n_obs = 1000
burn_in = 1000
lambda_1 = 1
lambda_2 = 0.3
G_1 = 0.3
G_2 = 0.9
test_that("Function returns a numeric vector", {
expect_equal(class(estimate_g2(10, p_12 = p_12, p_21 = p_21, n_obs = n_obs, burn_in = burn_in,
lambda_1 = lambda_1, lambda_2 = lambda_2, G_1 = G_1, G_2 = G_2,
progress = FALSE)), "numeric")
expect_equal(class(estimate_g2(20, p_12 = p_12, p_21 = p_21, n_obs = n_obs, burn_in = burn_in,
lambda_1 = lambda_1, lambda_2 = lambda_2, G_1 = G_1, G_2 = G_2,
progress = FALSE)), "numeric")
expect_equal(class(estimate_g2(3, p_12 = p_12, p_21 = p_21, n_obs = n_obs, burn_in = burn_in,
lambda_1 = lambda_1, lambda_2 = lambda_2, G_1 = G_1, G_2 = G_2,
progress = FALSE)), "numeric")
})
test_that("Function returns a numeric vector of expected length" , {
expect_equal(length(estimate_g2(10, p_12 = p_12, p_21 = p_21, n_obs = n_obs, burn_in = burn_in,
lambda_1 = lambda_1, lambda_2 = lambda_2, G_1 = G_1, G_2 = G_2,
progress = FALSE)), 9)
expect_equal(length(estimate_g2(20, p_12 = p_12, p_21 = p_21, n_obs = n_obs, burn_in = burn_in,
lambda_1 = lambda_1, lambda_2 = lambda_2, G_1 = G_1, G_2 = G_2,
progress = FALSE)), 19)
expect_equal(length(estimate_g2(3, p_12 = p_12, p_21 = p_21, n_obs = n_obs, burn_in = burn_in,
lambda_1 = lambda_1, lambda_2 = lambda_2, G_1 = G_1, G_2 = G_2,
progress = FALSE)), 2)
})
context("Estimate G2")
test_that("Correct transformations" , {
expect_equal(estimate_G2(c(0.3, 0.2, 0.4, 0.8, 0.1)), c(0.3, 0.5, 0.9, 1, 1))
expect_equal(estimate_G2(c(0.3, 0.2, - 0.4, 0.8, 0.1)), c(0.3, 0.5, 0.5, 1, 1))
expect_equal(estimate_G2(c(4.3, 0.2, 0.4, 0.8, 0.1)), c(1, 1, 1, 1, 1))
})
\ No newline at end of file
......@@ -48,20 +48,18 @@ test_that("Number of arriving customers is Poisson distributed", {
expect_equal(sum(qn_3$arrival_2) < 2*(100)*2, TRUE)
})
p_12 = 0.1
p_21 = 0.2
lambda_1 = 3
lambda_2 = 1
G_1 = 0.3
G_2 = 0.2
qn_1 <- queueing_network_poi_geom(10000, 1000, p_12 = p_12, p_21 = p_21,
lambda_1 = lambda_1, lambda_2 = lambda_2,
G_1 = G_1, G_2 = G_2)
test_that("Lemma 1 in Wichelhaus, Edelmann is satisfied", {
p_12 = 0.1
p_21 = 0.2
lambda_1 = 3
lambda_2 = 1
G_1 = 0.3
G_2 = 0.2
qn_1 <- queueing_network_poi_geom(10000, 1000, p_12 = p_12, p_21 = p_21,
lambda_1 = lambda_1, lambda_2 = lambda_2,
G_1 = G_1, G_2 = G_2)
expect_equal(sum(qn_1$departure_1) > 0.5*(1-p_12)/(1-p_12*p_21)*(sum(qn_1$arrival_1) + p_21*sum(qn_1$arrival_2)), TRUE)
expect_equal(sum(qn_1$departure_1) < 1.5*(1-p_12)/(1-p_12*p_21)*(sum(qn_1$arrival_1) + p_21*sum(qn_1$arrival_2)), TRUE)
expect_equal(sum(qn_1$departure_1) > 0.8*(1-p_12)/(1-p_12*p_21)*(sum(qn_1$arrival_1) + p_21*sum(qn_1$arrival_2)), TRUE)
......
context("test-represent_single_customer.R")
test_that("Output of single customer as expected", {
expect_equal(represent_single_customer(1,c(1,2),100), c(NA, NA, 2, rep(NA,97)))
expect_equal(represent_single_customer(2,c(1,2),100), c(NA, NA, 1, rep(NA,97)))
})
test_that("Max Length defines length of vector", {
expect_equal(length(represent_single_customer(1,c(1,2, 300),95)), 95)
expect_equal(length(represent_single_customer(1,c(1,2),95)), 95)
expect_equal(length(represent_single_customer(1,c(1,2, 300),2)), 2)
expect_equal(length(represent_single_customer(1,c(1),2)), 2)
})
test_that("Exactly one entry is not NA", {
expect_equal(sum(is.na(represent_single_customer(1,c(1,2, 300),95))), 95)
expect_equal(sum(!is.na(represent_single_customer(1,c(1,2, 300),95))), 0)
expect_equal(sum(!is.na(represent_single_customer(1,c(1,1,30),95))), 1)
expect_equal(sum(!is.na(represent_single_customer(2,c(1),95))), 1)
expect_equal(sum(is.na(represent_single_customer(1,c(1,1,2),99))), 98)
})
context("test-represent_single_customer.R")
test_that("Output of single customer as expected", {
expect_equal(represent_single_customer(1,c(1,2),100), c(NA, NA, 2, rep(NA,97)))
expect_equal(represent_single_customer(2,c(1,2),100), c(NA, NA, 1, rep(NA,97)))
expect_equal(represent_single_customer(2,c(1,2,2),100), c(NA, NA, NA, NA, 2, rep(NA,95)))
expect_equal(represent_single_customer(2,c(2,1,2),100), c(NA, NA, NA, NA, 2, rep(NA,95)))
expect_equal(represent_single_customer(2,c(2,2,1),100), c(NA, NA, NA, NA, 2, rep(NA,95)))
expect_equal(represent_single_customer(1,c(2,2,1),100), c(NA, NA, NA, NA, 1, rep(NA,95)))
expect_equal(represent_single_customer(1,c(1,2,2),100), c(NA, NA, NA, NA, 1, rep(NA,95)))
expect_equal(represent_single_customer(1,c(2,1,2),100), c(NA, NA, NA, NA, 1, rep(NA,95)))
})
test_that("Max Length defines length of vector", {
expect_equal(length(represent_single_customer(1,c(1,2, 300),95)), 95)
expect_equal(length(represent_single_customer(1,c(1,2),95)), 95)
expect_equal(length(represent_single_customer(1,c(1,2, 300),2)), 2)
expect_equal(length(represent_single_customer(1,c(1),2)), 2)
})
test_that("Exactly one entry is not NA", {
expect_equal(sum(is.na(represent_single_customer(1,c(1,2, 300),95))), 95)
expect_equal(sum(!is.na(represent_single_customer(1,c(1,2, 300),95))), 0)
expect_equal(sum(!is.na(represent_single_customer(1,c(1,1,30),95))), 1)
expect_equal(sum(!is.na(represent_single_customer(2,c(1),95))), 1)
expect_equal(sum(is.na(represent_single_customer(1,c(1,1,2),99))), 98)
})
context("test-simulate_single_customer.R")
test_that("Result is numeric vector", {
expect_equal(class(simulate_single_customer(1,0.9,0.9,0.3,0.7)), "numeric")
expect_equal(class(simulate_single_customer(2,0.1,0.001,0.3,0.7)), "numeric")
expect_equal(class(simulate_single_customer(2,0.001,0.001,0.3,0.7)), "numeric")
expect_message(simulate_single_customer(1,1,1,0.3,0.7), "Probabilities of geometric distribution out of allowed range")
expect_equal(is.vector(simulate_single_customer(1,0.9,0.9,0.3,0.7)), TRUE)
expect_equal(is.vector(simulate_single_customer(2,0.1,0.001,0.3,0.7)), TRUE)
expect_equal(is.vector(simulate_single_customer(2,0.001,0.001,0.3,0.7)), TRUE)
})
context("test-simulate_single_customer.R")
test_that("Result is numeric vector", {
expect_equal(class(simulate_single_customer(1,0.9,0.9,0.3,0.7)), "numeric")
expect_equal(class(simulate_single_customer(2,0.1,0.001,0.3,0.7)), "numeric")
expect_equal(class(simulate_single_customer(2,0.001,0.001,0.3,0.7)), "numeric")
expect_message(simulate_single_customer(1,1,1,0.3,0.7), "Probabilities of geometric distribution out of allowed range")
expect_equal(is.vector(simulate_single_customer(1,0.9,0.9,0.3,0.7)), TRUE)
expect_equal(is.vector(simulate_single_customer(2,0.1,0.001,0.3,0.7)), TRUE)
expect_equal(is.vector(simulate_single_customer(2,0.001,0.001,0.3,0.7)), TRUE)
})
test_that("Proportion of customers leaving from a certain node corresponds to theory",{
p_12 = 0.3
p_21 = 0.8
expect_equal(mean(replicate(1000, {length(simulate_single_customer(1, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 1})) > (1 - p_12)/(1 - p_12*p_21) - 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(1, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 1})) < (1 - p_12)/(1 - p_12*p_21) + 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(2, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 0})) > (1 - p_12)*p_21/(1 - p_12*p_21) - 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(2, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 0})) < (1 - p_12)*p_21/(1 - p_12*p_21) + 0.1, TRUE)
p_12 = 0.1
p_21 = 0.1
expect_equal(mean(replicate(1000, {length(simulate_single_customer(1, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 1})) > (1 - p_12)/(1 - p_12*p_21) - 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(1, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 1})) < (1 - p_12)/(1 - p_12*p_21) + 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(2, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 0})) > (1 - p_12)*p_21/(1 - p_12*p_21) - 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(2, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 0})) < (1 - p_12)*p_21/(1 - p_12*p_21) + 0.1, TRUE)
p_12 = 0.9
p_21 = 0.8
expect_equal(mean(replicate(1000, {length(simulate_single_customer(1, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 1})) > (1 - p_12)/(1 - p_12*p_21) - 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(1, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 1})) < (1 - p_12)/(1 - p_12*p_21) + 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(2, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 0})) > (1 - p_12)*p_21/(1 - p_12*p_21) - 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(2, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 0})) < (1 - p_12)*p_21/(1 - p_12*p_21) + 0.1, TRUE)
p_12 = 0.78
p_21 = 0.1
expect_equal(mean(replicate(1000, {length(simulate_single_customer(1, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 1})) > (1 - p_12)/(1 - p_12*p_21) - 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(1, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 1})) < (1 - p_12)/(1 - p_12*p_21) + 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(2, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 0})) > (1 - p_12)*p_21/(1 - p_12*p_21) - 0.1, TRUE)
expect_equal(mean(replicate(1000, {length(simulate_single_customer(2, p_12 = p_12, p_21 = p_21, 0.6, 0.8)) %% 2 == 0})) < (1 - p_12)*p_21/(1 - p_12*p_21) + 0.1, TRUE)
})
\ No newline at end of file
This diff is collapsed.