diff --git a/.Rbuildignore b/.Rbuildignore new file mode 100644 index 0000000..91114bf --- /dev/null +++ b/.Rbuildignore @@ -0,0 +1,2 @@ +^.*\.Rproj$ +^\.Rproj\.user$ diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3da7c76 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +.Rproj.user +.Rhistory +.RData +.Ruserdata +src/*.o +src/*.so +src/*.dll +*.cc +*.h +*.Rproj diff --git a/DESCRIPTION b/DESCRIPTION index c1a4fca..7de1a8b 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -16,8 +16,8 @@ Depends: R (>= 3.4.0) Imports: R6, - Rcpp, - RcppParallel, + Rcpp (>= 0.12.0), + RcppParallel (>= 5.0.1), cubelyr, dplyr, ggplot2, @@ -25,12 +25,19 @@ Imports: magrittr, methods, purrr, - rstan, - rstantools, + rstan (>= 2.18.1), + rstantools (>= 2.1.1), scales, tibble, tidyr, - vizdraws + vizdraws (>= 1.1) +LinkingTo: + BH (>= 1.66.0), + Rcpp (>= 0.12.0), + RcppEigen (>= 0.3.3.3.0), + RcppParallel (>= 5.0.1), + rstan (>= 2.18.1), + StanHeaders (>= 2.18.0) Suggests: knitr, rmarkdown, diff --git a/Docker/build_test/.gitignore b/Docker/build_test/.gitignore new file mode 100644 index 0000000..335ec95 --- /dev/null +++ b/Docker/build_test/.gitignore @@ -0,0 +1 @@ +*.tar.gz diff --git a/Docker/build_test/Dockerfile b/Docker/build_test/Dockerfile new file mode 100644 index 0000000..01c5e85 --- /dev/null +++ b/Docker/build_test/Dockerfile @@ -0,0 +1,45 @@ +FROM rocker/r-base:latest +LABEL maintainer="Ignacio Martinez " + +RUN apt-get update -qq \ + && apt-get install -y \ + libxml2-dev \ + libssl-dev \ + libcurl4-openssl-dev \ + libv8-dev \ + texlive-latex-extra \ + libharfbuzz-dev \ + libfribidi-dev \ + libpng-dev \ + libtiff5-dev \ + libjpeg-dev \ + libpq5 \ + pandoc \ + && install.r \ + magrittr \ + R6 \ + Rcpp \ + cubelyr \ + dplyr \ + ggplot2 \ + glue \ + magrittr \ + methods \ + purrr \ + rstan \ + rstantools \ + scales \ + tibble \ + tidyr \ + vizdraws \ + devtools \ + BH \ + RcppEigen \ + methods \ + rmarkdown \ + vizdraws \ + knitr \ + rmarkdown \ + roxygen2 \ + gsynth + diff --git a/Docker/build_test/build.sh b/Docker/build_test/build.sh new file mode 100755 index 0000000..10a5b22 --- /dev/null +++ b/Docker/build_test/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +docker build --tag bsynth . + +docker run --rm \ + -v $(pwd):/tmp/working_dir \ + -v $(pwd)/../..:/tmp/bsynth \ + -w /tmp/working_dir \ + bsynth \ + R -e "devtools::test(pkg = '/tmp/bsynth'); devtools::build(pkg = '/tmp/bsynth', path = '/tmp/working_dir, binary = TRUE')" + + diff --git a/NAMESPACE b/NAMESPACE index ecc4cfd..ec07c5d 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -7,3 +7,4 @@ import(Rcpp) import(methods) importFrom(magrittr,"%>%") importFrom(rstan,sampling) +useDynLib(bsynth, .registration = TRUE) diff --git a/R/bfactor.R b/R/bfactor.R index bbe8db7..3325638 100644 --- a/R/bfactor.R +++ b/R/bfactor.R @@ -153,9 +153,9 @@ bayesianFactor <- R6::R6Class( # dplyr::select(-{{id}}) %>% # any(is.na())) - private$stan_model <- bsynth.models::factor_model_with_covariates + private$stan_model <- stanmodels$factor_model_with_covariates } else { - private$stan_model <- bsynth.models::factor_model_without_covariates + private$stan_model <- stanmodels$factor_model_without_covariates } private$data <- data %>% diff --git a/R/factory.R b/R/factory.R index fce0a72..a5ba943 100644 --- a/R/factory.R +++ b/R/factory.R @@ -248,19 +248,19 @@ bayesianSynth <- R6::R6Class( if (length(private$treated_ids) == 1) { if (is.null(covariates)) { if (gp) { - private$stan_model <- bsynth.models::model2 # stanmodels$model2 + private$stan_model <- stanmodels$model2 } else { if (predictor_match) { - private$stan_model <- bsynth.models::model1_gammaOmega # gamma BCS + private$stan_model <- stanmodels$model1_gammaOmega } else { - private$stan_model <- bsynth.models::model1 # stanmodels$model1 + private$stan_model <- stanmodels$model1 } } } else { if (gp) { - private$stan_model <- bsynth.models::model4 # stanmodels$model4 + private$stan_model <- stanmodels$model4 } else { - private$stan_model <- bsynth.models::model3 # stanmodels$model3 + private$stan_model <- stanmodels$model3 } private$covariates <- covariates %>% dplyr::arrange(!!private$time) @@ -268,15 +268,15 @@ bayesianSynth <- R6::R6Class( } else { if (is.null(covariates)) { if (gp) { - private$stan_model <- bsynth.models::model8 # stanmodels$model8 + private$stan_model <- stanmodels$model8 } else { - private$stan_model <- bsynth.models::model5 # stanmodels$model5 + private$stan_model <- stanmodels$model5 } } else { if (gp) { - private$stan_model <- bsynth.models::model7 # stanmodels$model7 + private$stan_model <- stanmodels$model7 } else { - private$stan_model <- bsynth.models::model6 # stanmodels$model6 + private$stan_model <- stanmodels$model6 } private$covariates <- covariates %>% dplyr::arrange(!!private$time) diff --git a/R/stanmodels.R b/R/stanmodels.R new file mode 100644 index 0000000..7357bd0 --- /dev/null +++ b/R/stanmodels.R @@ -0,0 +1,36 @@ +# Generated by rstantools. Do not edit by hand. + +# names of stan models +stanmodels <- c("factor_functions", "factor_model_with_covariates", "factor_model_without_covariates", "model1_gammaOmega", "model1", "model2", "model3", "model4", "model5", "model6", "model7", "model8") + +# load each stan module +Rcpp::loadModule("stan_fit4factor_functions_mod", what = TRUE) +Rcpp::loadModule("stan_fit4factor_model_with_covariates_mod", what = TRUE) +Rcpp::loadModule("stan_fit4factor_model_without_covariates_mod", what = TRUE) +Rcpp::loadModule("stan_fit4model1_gammaOmega_mod", what = TRUE) +Rcpp::loadModule("stan_fit4model1_mod", what = TRUE) +Rcpp::loadModule("stan_fit4model2_mod", what = TRUE) +Rcpp::loadModule("stan_fit4model3_mod", what = TRUE) +Rcpp::loadModule("stan_fit4model4_mod", what = TRUE) +Rcpp::loadModule("stan_fit4model5_mod", what = TRUE) +Rcpp::loadModule("stan_fit4model6_mod", what = TRUE) +Rcpp::loadModule("stan_fit4model7_mod", what = TRUE) +Rcpp::loadModule("stan_fit4model8_mod", what = TRUE) + +# instantiate each stanmodel object +stanmodels <- sapply(stanmodels, function(model_name) { + # create C++ code for stan model + stan_file <- if(dir.exists("stan")) "stan" else file.path("inst", "stan") + stan_file <- file.path(stan_file, paste0(model_name, ".stan")) + stanfit <- rstan::stanc_builder(stan_file, + allow_undefined = TRUE, + obfuscate_model_name = FALSE) + stanfit$model_cpp <- list(model_cppname = stanfit$model_name, + model_cppcode = stanfit$cppcode) + # create stanmodel object + methods::new(Class = "stanmodel", + model_name = stanfit$model_name, + model_code = stanfit$model_code, + model_cpp = stanfit$model_cpp, + mk_cppmodule = function(x) get(paste0("model_", model_name))) +}) diff --git a/inst/include/stan_meta_header.hpp b/inst/include/stan_meta_header.hpp new file mode 100644 index 0000000..3b914da --- /dev/null +++ b/inst/include/stan_meta_header.hpp @@ -0,0 +1 @@ +// Insert all #include statements here diff --git a/src/Makevars b/src/Makevars new file mode 100644 index 0000000..bcc8a7c --- /dev/null +++ b/src/Makevars @@ -0,0 +1,9 @@ +# Generated by rstantools. Do not edit by hand. + +STANHEADERS_SRC = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "message()" -e "cat(system.file('include', 'src', package = 'StanHeaders', mustWork = TRUE))" -e "message()" | grep "StanHeaders") + +PKG_CPPFLAGS = -I"../inst/include" -I"$(STANHEADERS_SRC)" -DBOOST_DISABLE_ASSERTS -DEIGEN_NO_DEBUG -DBOOST_MATH_OVERFLOW_ERROR_POLICY=errno_on_error +PKG_CXXFLAGS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::CxxFlags()") $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::CxxFlags()") +PKG_LIBS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::RcppParallelLibs()") $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::LdFlags()") + +CXX_STD = CXX14 diff --git a/src/Makevars.win b/src/Makevars.win new file mode 100644 index 0000000..60f0bdd --- /dev/null +++ b/src/Makevars.win @@ -0,0 +1,9 @@ +# Generated by rstantools. Do not edit by hand. + +STANHEADERS_SRC = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "message()" -e "cat(system.file('include', 'src', package = 'StanHeaders', mustWork = TRUE))" -e "message()" | grep "StanHeaders") + +PKG_CPPFLAGS = -I"../inst/include" -I"$(STANHEADERS_SRC)" -DBOOST_DISABLE_ASSERTS -DEIGEN_NO_DEBUG -DRCPP_PARALLEL_USE_TBB=1 +PKG_CXXFLAGS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::CxxFlags()") $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::CxxFlags()") +PKG_LIBS = $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "RcppParallel::RcppParallelLibs()") $(shell "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "StanHeaders:::LdFlags()") + +CXX_STD = CXX14 diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp new file mode 100644 index 0000000..2eda56f --- /dev/null +++ b/src/RcppExports.cpp @@ -0,0 +1,47 @@ +// Generated by using Rcpp::compileAttributes() -> do not edit by hand +// Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 + +#include +#include + +using namespace Rcpp; + +#ifdef RCPP_USE_GLOBAL_ROSTREAM +Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); +Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); +#endif + + +RcppExport SEXP _rcpp_module_boot_stan_fit4factor_functions_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4factor_model_with_covariates_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4factor_model_without_covariates_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4model1_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4model1_gammaOmega_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4model2_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4model3_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4model4_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4model5_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4model6_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4model7_mod(); +RcppExport SEXP _rcpp_module_boot_stan_fit4model8_mod(); + +static const R_CallMethodDef CallEntries[] = { + {"_rcpp_module_boot_stan_fit4factor_functions_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4factor_functions_mod, 0}, + {"_rcpp_module_boot_stan_fit4factor_model_with_covariates_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4factor_model_with_covariates_mod, 0}, + {"_rcpp_module_boot_stan_fit4factor_model_without_covariates_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4factor_model_without_covariates_mod, 0}, + {"_rcpp_module_boot_stan_fit4model1_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4model1_mod, 0}, + {"_rcpp_module_boot_stan_fit4model1_gammaOmega_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4model1_gammaOmega_mod, 0}, + {"_rcpp_module_boot_stan_fit4model2_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4model2_mod, 0}, + {"_rcpp_module_boot_stan_fit4model3_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4model3_mod, 0}, + {"_rcpp_module_boot_stan_fit4model4_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4model4_mod, 0}, + {"_rcpp_module_boot_stan_fit4model5_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4model5_mod, 0}, + {"_rcpp_module_boot_stan_fit4model6_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4model6_mod, 0}, + {"_rcpp_module_boot_stan_fit4model7_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4model7_mod, 0}, + {"_rcpp_module_boot_stan_fit4model8_mod", (DL_FUNC) &_rcpp_module_boot_stan_fit4model8_mod, 0}, + {NULL, NULL, 0} +}; + +RcppExport void R_init_bsynth(DllInfo *dll) { + R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); + R_useDynamicSymbols(dll, FALSE); +} diff --git a/src/stanExports_factor_functions.cc b/src/stanExports_factor_functions.cc new file mode 100644 index 0000000..57fc903 --- /dev/null +++ b/src/stanExports_factor_functions.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_factor_functions.h" + +RCPP_MODULE(stan_fit4factor_functions_mod) { + + + class_ >("model_factor_functions") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_factor_functions.h b/src/stanExports_factor_functions.h new file mode 100644 index 0000000..fcea13a --- /dev/null +++ b/src/stanExports_factor_functions.h @@ -0,0 +1,361 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_factor_functions_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_factor_functions"); + reader.add_event(74, 72, "end", "model_factor_functions"); + return reader; +} +template +Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> +make_F(const int& T, + const Eigen::Matrix& diagonal_loadings, + const Eigen::Matrix& lower_tri_loadings, std::ostream* pstream__) { + typedef typename boost::math::tools::promote_args::type local_scalar_t__; + typedef local_scalar_t__ fun_return_scalar_t__; + const static bool propto__ = true; + (void) propto__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + int current_statement_begin__ = -1; + try { + { + current_statement_begin__ = 27; + int L(0); + (void) L; // dummy to suppress unused var warning + stan::math::fill(L, std::numeric_limits::min()); + stan::math::assign(L,num_elements(diagonal_loadings)); + current_statement_begin__ = 28; + int M(0); + (void) M; // dummy to suppress unused var warning + stan::math::fill(M, std::numeric_limits::min()); + stan::math::assign(M,num_elements(lower_tri_loadings)); + current_statement_begin__ = 29; + validate_non_negative_index("F", "T", T); + validate_non_negative_index("F", "L", L); + Eigen::Matrix F(T, L); + stan::math::initialize(F, DUMMY_VAR__); + stan::math::fill(F, DUMMY_VAR__); + current_statement_begin__ = 31; + int idx(0); + (void) idx; // dummy to suppress unused var warning + stan::math::fill(idx, std::numeric_limits::min()); + stan::math::assign(idx,0); + current_statement_begin__ = 33; + for (int j = 1; j <= L; ++j) { + current_statement_begin__ = 34; + stan::model::assign(F, + stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + get_base1(diagonal_loadings, j, "diagonal_loadings", 1), + "assigning variable F"); + current_statement_begin__ = 35; + for (int i = (j + 1); i <= T; ++i) { + current_statement_begin__ = 36; + stan::math::assign(idx, (idx + 1)); + current_statement_begin__ = 37; + stan::model::assign(F, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + get_base1(lower_tri_loadings, idx, "lower_tri_loadings", 1), + "assigning variable F"); + } + } + current_statement_begin__ = 40; + for (int j = 1; j <= (L - 1); ++j) { + current_statement_begin__ = 41; + for (int i = (j + 1); i <= L; ++i) { + current_statement_begin__ = 41; + stan::model::assign(F, + stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list())), + 0, + "assigning variable F"); + } + } + current_statement_begin__ = 44; + return stan::math::promote_scalar(F); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } +} +struct make_F_functor__ { + template + Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> + operator()(const int& T, + const Eigen::Matrix& diagonal_loadings, + const Eigen::Matrix& lower_tri_loadings, std::ostream* pstream__) const { + return make_F(T, diagonal_loadings, lower_tri_loadings, pstream__); + } +}; +template +Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> +make_beta(const int& J, + const Eigen::Matrix& off, + const Eigen::Matrix& lambda, + const T3__& eta, + const Eigen::Matrix& tau, std::ostream* pstream__) { + typedef typename boost::math::tools::promote_args::type local_scalar_t__; + typedef local_scalar_t__ fun_return_scalar_t__; + const static bool propto__ = true; + (void) propto__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + int current_statement_begin__ = -1; + try { + { + current_statement_begin__ = 60; + int L(0); + (void) L; // dummy to suppress unused var warning + stan::math::fill(L, std::numeric_limits::min()); + stan::math::assign(L,cols(off)); + current_statement_begin__ = 61; + validate_non_negative_index("cache", "L", L); + Eigen::Matrix cache(L); + stan::math::initialize(cache, DUMMY_VAR__); + stan::math::fill(cache, DUMMY_VAR__); + stan::math::assign(cache,multiply(stan::math::tan(multiply((0.5 * stan::math::pi()), lambda)), stan::math::tan(((0.5 * stan::math::pi()) * eta)))); + current_statement_begin__ = 64; + validate_non_negative_index("tau_", "J", J); + Eigen::Matrix tau_(J); + stan::math::initialize(tau_, DUMMY_VAR__); + stan::math::fill(tau_, DUMMY_VAR__); + stan::math::assign(tau_,stan::math::tan(multiply((0.5 * stan::math::pi()), tau))); + current_statement_begin__ = 65; + validate_non_negative_index("out", "J", J); + validate_non_negative_index("out", "L", L); + Eigen::Matrix out(J, L); + stan::math::initialize(out, DUMMY_VAR__); + stan::math::fill(out, DUMMY_VAR__); + current_statement_begin__ = 67; + for (int j = 1; j <= J; ++j) { + current_statement_begin__ = 68; + stan::model::assign(out, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + multiply(get_base1(off, j, "off", 1), get_base1(tau_, j, "tau_", 1)), + "assigning variable out"); + } + current_statement_begin__ = 70; + return stan::math::promote_scalar(diag_pre_multiply(cache, transpose(out))); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } +} +struct make_beta_functor__ { + template + Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> + operator()(const int& J, + const Eigen::Matrix& off, + const Eigen::Matrix& lambda, + const T3__& eta, + const Eigen::Matrix& tau, std::ostream* pstream__) const { + return make_beta(J, off, lambda, eta, tau, pstream__); + } +}; +#include +class model_factor_functions + : public stan::model::model_base_crtp { +private: +public: + model_factor_functions(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_factor_functions(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_factor_functions_namespace::model_factor_functions"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + // initialize transformed data variables + // execute transformed data statements + // validate transformed data + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_factor_functions() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + // model body + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_factor_functions_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + if (!include_gqs__ && !include_tparams__) return; + if (!include_gqs__) return; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_factor_functions"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + } +}; // model +} // namespace +typedef model_factor_functions_namespace::model_factor_functions stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_factor_model_with_covariates.cc b/src/stanExports_factor_model_with_covariates.cc new file mode 100644 index 0000000..ee384bb --- /dev/null +++ b/src/stanExports_factor_model_with_covariates.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_factor_model_with_covariates.h" + +RCPP_MODULE(stan_fit4factor_model_with_covariates_mod) { + + + class_ >("model_factor_model_with_covariates") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_factor_model_with_covariates.h b/src/stanExports_factor_model_with_covariates.h new file mode 100644 index 0000000..d52d87b --- /dev/null +++ b/src/stanExports_factor_model_with_covariates.h @@ -0,0 +1,1321 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_factor_model_with_covariates_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_factor_model_with_covariates"); + reader.add_event(24, 24, "include", "factor_functions.stan"); + reader.add_event(24, 0, "start", "factor_functions.stan"); + reader.add_event(96, 72, "end", "factor_functions.stan"); + reader.add_event(96, 25, "restart", "model_factor_model_with_covariates"); + reader.add_event(211, 138, "end", "model_factor_model_with_covariates"); + return reader; +} +template +Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> +make_F(const int& T, + const Eigen::Matrix& diagonal_loadings, + const Eigen::Matrix& lower_tri_loadings, std::ostream* pstream__) { + typedef typename boost::math::tools::promote_args::type local_scalar_t__; + typedef local_scalar_t__ fun_return_scalar_t__; + const static bool propto__ = true; + (void) propto__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + int current_statement_begin__ = -1; + try { + { + current_statement_begin__ = 51; + int L(0); + (void) L; // dummy to suppress unused var warning + stan::math::fill(L, std::numeric_limits::min()); + stan::math::assign(L,num_elements(diagonal_loadings)); + current_statement_begin__ = 52; + int M(0); + (void) M; // dummy to suppress unused var warning + stan::math::fill(M, std::numeric_limits::min()); + stan::math::assign(M,num_elements(lower_tri_loadings)); + current_statement_begin__ = 53; + validate_non_negative_index("F", "T", T); + validate_non_negative_index("F", "L", L); + Eigen::Matrix F(T, L); + stan::math::initialize(F, DUMMY_VAR__); + stan::math::fill(F, DUMMY_VAR__); + current_statement_begin__ = 55; + int idx(0); + (void) idx; // dummy to suppress unused var warning + stan::math::fill(idx, std::numeric_limits::min()); + stan::math::assign(idx,0); + current_statement_begin__ = 57; + for (int j = 1; j <= L; ++j) { + current_statement_begin__ = 58; + stan::model::assign(F, + stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + get_base1(diagonal_loadings, j, "diagonal_loadings", 1), + "assigning variable F"); + current_statement_begin__ = 59; + for (int i = (j + 1); i <= T; ++i) { + current_statement_begin__ = 60; + stan::math::assign(idx, (idx + 1)); + current_statement_begin__ = 61; + stan::model::assign(F, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + get_base1(lower_tri_loadings, idx, "lower_tri_loadings", 1), + "assigning variable F"); + } + } + current_statement_begin__ = 64; + for (int j = 1; j <= (L - 1); ++j) { + current_statement_begin__ = 65; + for (int i = (j + 1); i <= L; ++i) { + current_statement_begin__ = 65; + stan::model::assign(F, + stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list())), + 0, + "assigning variable F"); + } + } + current_statement_begin__ = 68; + return stan::math::promote_scalar(F); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } +} +struct make_F_functor__ { + template + Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> + operator()(const int& T, + const Eigen::Matrix& diagonal_loadings, + const Eigen::Matrix& lower_tri_loadings, std::ostream* pstream__) const { + return make_F(T, diagonal_loadings, lower_tri_loadings, pstream__); + } +}; +template +Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> +make_beta(const int& J, + const Eigen::Matrix& off, + const Eigen::Matrix& lambda, + const T3__& eta, + const Eigen::Matrix& tau, std::ostream* pstream__) { + typedef typename boost::math::tools::promote_args::type local_scalar_t__; + typedef local_scalar_t__ fun_return_scalar_t__; + const static bool propto__ = true; + (void) propto__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + int current_statement_begin__ = -1; + try { + { + current_statement_begin__ = 84; + int L(0); + (void) L; // dummy to suppress unused var warning + stan::math::fill(L, std::numeric_limits::min()); + stan::math::assign(L,cols(off)); + current_statement_begin__ = 85; + validate_non_negative_index("cache", "L", L); + Eigen::Matrix cache(L); + stan::math::initialize(cache, DUMMY_VAR__); + stan::math::fill(cache, DUMMY_VAR__); + stan::math::assign(cache,multiply(stan::math::tan(multiply((0.5 * stan::math::pi()), lambda)), stan::math::tan(((0.5 * stan::math::pi()) * eta)))); + current_statement_begin__ = 88; + validate_non_negative_index("tau_", "J", J); + Eigen::Matrix tau_(J); + stan::math::initialize(tau_, DUMMY_VAR__); + stan::math::fill(tau_, DUMMY_VAR__); + stan::math::assign(tau_,stan::math::tan(multiply((0.5 * stan::math::pi()), tau))); + current_statement_begin__ = 89; + validate_non_negative_index("out", "J", J); + validate_non_negative_index("out", "L", L); + Eigen::Matrix out(J, L); + stan::math::initialize(out, DUMMY_VAR__); + stan::math::fill(out, DUMMY_VAR__); + current_statement_begin__ = 91; + for (int j = 1; j <= J; ++j) { + current_statement_begin__ = 92; + stan::model::assign(out, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + multiply(get_base1(off, j, "off", 1), get_base1(tau_, j, "tau_", 1)), + "assigning variable out"); + } + current_statement_begin__ = 94; + return stan::math::promote_scalar(diag_pre_multiply(cache, transpose(out))); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } +} +struct make_beta_functor__ { + template + Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> + operator()(const int& J, + const Eigen::Matrix& off, + const Eigen::Matrix& lambda, + const T3__& eta, + const Eigen::Matrix& tau, std::ostream* pstream__) const { + return make_beta(J, off, lambda, eta, tau, pstream__); + } +}; +#include +class model_factor_model_with_covariates + : public stan::model::model_base_crtp { +private: + int L; + int N; + row_vector_d y_treated_pre; + int J; + std::vector y_donors_pre; + int N_pred; + std::vector y_donors_post; + int T; + int j_plus_1; + int M; + row_vector_d j_ones; + vector_d t_ones; + std::vector y_donors; + std::vector y_donors_pre_std; + std::vector y_donors_post_std; + vector_d mean_y_donors_pre; + vector_d sd_y_donors_pre; + double mean_y; + double sd_y; + row_vector_d y_std; +public: + model_factor_model_with_covariates(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_factor_model_with_covariates(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_factor_model_with_covariates_namespace::model_factor_model_with_covariates"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 99; + context__.validate_dims("data initialization", "L", "int", context__.to_vec()); + L = int(0); + vals_i__ = context__.vals_i("L"); + pos__ = 0; + L = vals_i__[pos__++]; + check_greater_or_equal(function__, "L", L, 2); + current_statement_begin__ = 100; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 101; + validate_non_negative_index("y_treated_pre", "N", N); + context__.validate_dims("data initialization", "y_treated_pre", "row_vector_d", context__.to_vec(N)); + y_treated_pre = Eigen::Matrix(N); + vals_r__ = context__.vals_r("y_treated_pre"); + pos__ = 0; + size_t y_treated_pre_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_treated_pre_j_1_max__; ++j_1__) { + y_treated_pre(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 103; + context__.validate_dims("data initialization", "J", "int", context__.to_vec()); + J = int(0); + vals_i__ = context__.vals_i("J"); + pos__ = 0; + J = vals_i__[pos__++]; + check_greater_or_equal(function__, "J", J, 0); + current_statement_begin__ = 104; + validate_non_negative_index("y_donors_pre", "N", N); + validate_non_negative_index("y_donors_pre", "J", J); + context__.validate_dims("data initialization", "y_donors_pre", "row_vector_d", context__.to_vec(J,N)); + y_donors_pre = std::vector >(J, Eigen::Matrix(N)); + vals_r__ = context__.vals_r("y_donors_pre"); + pos__ = 0; + size_t y_donors_pre_j_1_max__ = N; + size_t y_donors_pre_k_0_max__ = J; + for (size_t j_1__ = 0; j_1__ < y_donors_pre_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_donors_pre_k_0_max__; ++k_0__) { + y_donors_pre[k_0__](j_1__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 106; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 107; + validate_non_negative_index("y_donors_post", "N_pred", N_pred); + validate_non_negative_index("y_donors_post", "J", J); + context__.validate_dims("data initialization", "y_donors_post", "row_vector_d", context__.to_vec(J,N_pred)); + y_donors_post = std::vector >(J, Eigen::Matrix(N_pred)); + vals_r__ = context__.vals_r("y_donors_post"); + pos__ = 0; + size_t y_donors_post_j_1_max__ = N_pred; + size_t y_donors_post_k_0_max__ = J; + for (size_t j_1__ = 0; j_1__ < y_donors_post_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_donors_post_k_0_max__; ++k_0__) { + y_donors_post[k_0__](j_1__) = vals_r__[pos__++]; + } + } + // initialize transformed data variables + current_statement_begin__ = 112; + T = int(0); + stan::math::fill(T, std::numeric_limits::min()); + stan::math::assign(T,(N + N_pred)); + current_statement_begin__ = 113; + j_plus_1 = int(0); + stan::math::fill(j_plus_1, std::numeric_limits::min()); + stan::math::assign(j_plus_1,(J + 1)); + current_statement_begin__ = 114; + M = int(0); + stan::math::fill(M, std::numeric_limits::min()); + stan::math::assign(M,((L * (T - L)) + divide((L * (L - 1)), 2))); + current_statement_begin__ = 115; + validate_non_negative_index("j_ones", "j_plus_1", j_plus_1); + j_ones = Eigen::Matrix(j_plus_1); + stan::math::fill(j_ones, DUMMY_VAR__); + stan::math::assign(j_ones,rep_row_vector(1, j_plus_1)); + current_statement_begin__ = 116; + validate_non_negative_index("t_ones", "T", T); + t_ones = Eigen::Matrix(T); + stan::math::fill(t_ones, DUMMY_VAR__); + stan::math::assign(t_ones,rep_vector(1.0, T)); + current_statement_begin__ = 118; + validate_non_negative_index("y_donors", "T", T); + validate_non_negative_index("y_donors", "J", J); + y_donors = std::vector >(J, Eigen::Matrix(T)); + stan::math::fill(y_donors, DUMMY_VAR__); + current_statement_begin__ = 121; + validate_non_negative_index("y_donors_pre_std", "N", N); + validate_non_negative_index("y_donors_pre_std", "J", J); + y_donors_pre_std = std::vector >(J, Eigen::Matrix(N)); + stan::math::fill(y_donors_pre_std, DUMMY_VAR__); + current_statement_begin__ = 122; + validate_non_negative_index("y_donors_post_std", "N_pred", N_pred); + validate_non_negative_index("y_donors_post_std", "J", J); + y_donors_post_std = std::vector >(J, Eigen::Matrix(N_pred)); + stan::math::fill(y_donors_post_std, DUMMY_VAR__); + current_statement_begin__ = 123; + validate_non_negative_index("mean_y_donors_pre", "J", J); + mean_y_donors_pre = Eigen::Matrix(J); + stan::math::fill(mean_y_donors_pre, DUMMY_VAR__); + current_statement_begin__ = 124; + validate_non_negative_index("sd_y_donors_pre", "J", J); + sd_y_donors_pre = Eigen::Matrix(J); + stan::math::fill(sd_y_donors_pre, DUMMY_VAR__); + current_statement_begin__ = 125; + mean_y = double(0); + stan::math::fill(mean_y, DUMMY_VAR__); + stan::math::assign(mean_y,mean(y_treated_pre)); + current_statement_begin__ = 126; + sd_y = double(0); + stan::math::fill(sd_y, DUMMY_VAR__); + stan::math::assign(sd_y,sd(y_treated_pre)); + current_statement_begin__ = 127; + validate_non_negative_index("y_std", "N", N); + y_std = Eigen::Matrix(N); + stan::math::fill(y_std, DUMMY_VAR__); + stan::math::assign(y_std,divide(subtract(y_treated_pre, mean_y), sd_y)); + // execute transformed data statements + current_statement_begin__ = 129; + for (int j = 1; j <= J; ++j) { + current_statement_begin__ = 130; + stan::model::assign(mean_y_donors_pre, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + mean(get_base1(y_donors_pre, j, "y_donors_pre", 1)), + "assigning variable mean_y_donors_pre"); + current_statement_begin__ = 131; + stan::model::assign(sd_y_donors_pre, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + sd(get_base1(y_donors_pre, j, "y_donors_pre", 1)), + "assigning variable sd_y_donors_pre"); + current_statement_begin__ = 132; + stan::model::assign(y_donors_pre_std, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + divide(subtract(get_base1(y_donors_pre, j, "y_donors_pre", 1), get_base1(mean_y_donors_pre, j, "mean_y_donors_pre", 1)), get_base1(sd_y_donors_pre, j, "sd_y_donors_pre", 1)), + "assigning variable y_donors_pre_std"); + current_statement_begin__ = 134; + stan::model::assign(y_donors_post_std, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + divide(subtract(get_base1(y_donors_post, j, "y_donors_post", 1), get_base1(mean_y_donors_pre, j, "mean_y_donors_pre", 1)), get_base1(sd_y_donors_pre, j, "sd_y_donors_pre", 1)), + "assigning variable y_donors_post_std"); + current_statement_begin__ = 136; + stan::model::assign(y_donors, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + append_col(get_base1(y_donors_pre_std, j, "y_donors_pre_std", 1), get_base1(y_donors_post_std, j, "y_donors_post_std", 1)), + "assigning variable y_donors"); + } + // validate transformed data + current_statement_begin__ = 114; + check_greater_or_equal(function__, "M", M, 1); + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 142; + validate_non_negative_index("raw_b", "T", T); + num_params_r__ += T; + current_statement_begin__ = 143; + num_params_r__ += 1; + current_statement_begin__ = 144; + validate_non_negative_index("raw_c", "j_plus_1", j_plus_1); + num_params_r__ += j_plus_1; + current_statement_begin__ = 145; + num_params_r__ += 1; + current_statement_begin__ = 147; + validate_non_negative_index("beta_off", "j_plus_1", j_plus_1); + validate_non_negative_index("beta_off", "L", L); + num_params_r__ += (j_plus_1 * L); + current_statement_begin__ = 148; + validate_non_negative_index("lambda", "L", L); + num_params_r__ += L; + current_statement_begin__ = 149; + num_params_r__ += 1; + current_statement_begin__ = 150; + validate_non_negative_index("tau", "j_plus_1", j_plus_1); + num_params_r__ += j_plus_1; + current_statement_begin__ = 152; + validate_non_negative_index("y_missing", "N_pred", N_pred); + num_params_r__ += N_pred; + current_statement_begin__ = 154; + num_params_r__ += 1; + current_statement_begin__ = 156; + validate_non_negative_index("F_diag", "L", L); + num_params_r__ += L; + current_statement_begin__ = 157; + validate_non_negative_index("F_lower", "M", M); + num_params_r__ += M; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_factor_model_with_covariates() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 142; + if (!(context__.contains_r("raw_b"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable raw_b missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("raw_b"); + pos__ = 0U; + validate_non_negative_index("raw_b", "T", T); + context__.validate_dims("parameter initialization", "raw_b", "vector_d", context__.to_vec(T)); + Eigen::Matrix raw_b(T); + size_t raw_b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < raw_b_j_1_max__; ++j_1__) { + raw_b(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(raw_b); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable raw_b: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 143; + if (!(context__.contains_r("sigma_b"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma_b missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma_b"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma_b", "double", context__.to_vec()); + double sigma_b(0); + sigma_b = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma_b); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma_b: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 144; + if (!(context__.contains_r("raw_c"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable raw_c missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("raw_c"); + pos__ = 0U; + validate_non_negative_index("raw_c", "j_plus_1", j_plus_1); + context__.validate_dims("parameter initialization", "raw_c", "row_vector_d", context__.to_vec(j_plus_1)); + Eigen::Matrix raw_c(j_plus_1); + size_t raw_c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < raw_c_j_1_max__; ++j_1__) { + raw_c(j_1__) = vals_r__[pos__++]; + } + try { + writer__.row_vector_unconstrain(raw_c); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable raw_c: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 145; + if (!(context__.contains_r("sigma_c"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma_c missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma_c"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma_c", "double", context__.to_vec()); + double sigma_c(0); + sigma_c = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma_c); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma_c: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 147; + if (!(context__.contains_r("beta_off"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta_off missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta_off"); + pos__ = 0U; + validate_non_negative_index("beta_off", "j_plus_1", j_plus_1); + validate_non_negative_index("beta_off", "L", L); + context__.validate_dims("parameter initialization", "beta_off", "matrix_d", context__.to_vec(j_plus_1,L)); + Eigen::Matrix beta_off(j_plus_1, L); + size_t beta_off_j_2_max__ = L; + size_t beta_off_j_1_max__ = j_plus_1; + for (size_t j_2__ = 0; j_2__ < beta_off_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_off_j_1_max__; ++j_1__) { + beta_off(j_1__, j_2__) = vals_r__[pos__++]; + } + } + try { + writer__.matrix_unconstrain(beta_off); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta_off: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 148; + if (!(context__.contains_r("lambda"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable lambda missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("lambda"); + pos__ = 0U; + validate_non_negative_index("lambda", "L", L); + context__.validate_dims("parameter initialization", "lambda", "vector_d", context__.to_vec(L)); + Eigen::Matrix lambda(L); + size_t lambda_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + lambda(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_lub_unconstrain(0, 1, lambda); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable lambda: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 149; + if (!(context__.contains_r("eta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable eta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("eta"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "eta", "double", context__.to_vec()); + double eta(0); + eta = vals_r__[pos__++]; + try { + writer__.scalar_lub_unconstrain(0, 1, eta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable eta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 150; + if (!(context__.contains_r("tau"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable tau missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("tau"); + pos__ = 0U; + validate_non_negative_index("tau", "j_plus_1", j_plus_1); + context__.validate_dims("parameter initialization", "tau", "vector_d", context__.to_vec(j_plus_1)); + Eigen::Matrix tau(j_plus_1); + size_t tau_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < tau_j_1_max__; ++j_1__) { + tau(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_lub_unconstrain(0, 1, tau); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable tau: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 152; + if (!(context__.contains_r("y_missing"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable y_missing missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("y_missing"); + pos__ = 0U; + validate_non_negative_index("y_missing", "N_pred", N_pred); + context__.validate_dims("parameter initialization", "y_missing", "row_vector_d", context__.to_vec(N_pred)); + Eigen::Matrix y_missing(N_pred); + size_t y_missing_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_missing_j_1_max__; ++j_1__) { + y_missing(j_1__) = vals_r__[pos__++]; + } + try { + writer__.row_vector_unconstrain(y_missing); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable y_missing: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 154; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec()); + double sigma(0); + sigma = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 156; + if (!(context__.contains_r("F_diag"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable F_diag missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("F_diag"); + pos__ = 0U; + validate_non_negative_index("F_diag", "L", L); + context__.validate_dims("parameter initialization", "F_diag", "vector_d", context__.to_vec(L)); + Eigen::Matrix F_diag(L); + size_t F_diag_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < F_diag_j_1_max__; ++j_1__) { + F_diag(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_lb_unconstrain(0, F_diag); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable F_diag: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 157; + if (!(context__.contains_r("F_lower"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable F_lower missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("F_lower"); + pos__ = 0U; + validate_non_negative_index("F_lower", "M", M); + context__.validate_dims("parameter initialization", "F_lower", "vector_d", context__.to_vec(M)); + Eigen::Matrix F_lower(M); + size_t F_lower_j_1_max__ = M; + for (size_t j_1__ = 0; j_1__ < F_lower_j_1_max__; ++j_1__) { + F_lower(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(F_lower); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable F_lower: ") + e.what()), current_statement_begin__, prog_reader__()); + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 142; + Eigen::Matrix raw_b; + (void) raw_b; // dummy to suppress unused var warning + if (jacobian__) + raw_b = in__.vector_constrain(T, lp__); + else + raw_b = in__.vector_constrain(T); + current_statement_begin__ = 143; + local_scalar_t__ sigma_b; + (void) sigma_b; // dummy to suppress unused var warning + if (jacobian__) + sigma_b = in__.scalar_lb_constrain(0, lp__); + else + sigma_b = in__.scalar_lb_constrain(0); + current_statement_begin__ = 144; + Eigen::Matrix raw_c; + (void) raw_c; // dummy to suppress unused var warning + if (jacobian__) + raw_c = in__.row_vector_constrain(j_plus_1, lp__); + else + raw_c = in__.row_vector_constrain(j_plus_1); + current_statement_begin__ = 145; + local_scalar_t__ sigma_c; + (void) sigma_c; // dummy to suppress unused var warning + if (jacobian__) + sigma_c = in__.scalar_lb_constrain(0, lp__); + else + sigma_c = in__.scalar_lb_constrain(0); + current_statement_begin__ = 147; + Eigen::Matrix beta_off; + (void) beta_off; // dummy to suppress unused var warning + if (jacobian__) + beta_off = in__.matrix_constrain(j_plus_1, L, lp__); + else + beta_off = in__.matrix_constrain(j_plus_1, L); + current_statement_begin__ = 148; + Eigen::Matrix lambda; + (void) lambda; // dummy to suppress unused var warning + if (jacobian__) + lambda = in__.vector_lub_constrain(0, 1, L, lp__); + else + lambda = in__.vector_lub_constrain(0, 1, L); + current_statement_begin__ = 149; + local_scalar_t__ eta; + (void) eta; // dummy to suppress unused var warning + if (jacobian__) + eta = in__.scalar_lub_constrain(0, 1, lp__); + else + eta = in__.scalar_lub_constrain(0, 1); + current_statement_begin__ = 150; + Eigen::Matrix tau; + (void) tau; // dummy to suppress unused var warning + if (jacobian__) + tau = in__.vector_lub_constrain(0, 1, j_plus_1, lp__); + else + tau = in__.vector_lub_constrain(0, 1, j_plus_1); + current_statement_begin__ = 152; + Eigen::Matrix y_missing; + (void) y_missing; // dummy to suppress unused var warning + if (jacobian__) + y_missing = in__.row_vector_constrain(N_pred, lp__); + else + y_missing = in__.row_vector_constrain(N_pred); + current_statement_begin__ = 154; + local_scalar_t__ sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.scalar_lb_constrain(0, lp__); + else + sigma = in__.scalar_lb_constrain(0); + current_statement_begin__ = 156; + Eigen::Matrix F_diag; + (void) F_diag; // dummy to suppress unused var warning + if (jacobian__) + F_diag = in__.vector_lb_constrain(0, L, lp__); + else + F_diag = in__.vector_lb_constrain(0, L); + current_statement_begin__ = 157; + Eigen::Matrix F_lower; + (void) F_lower; // dummy to suppress unused var warning + if (jacobian__) + F_lower = in__.vector_constrain(M, lp__); + else + F_lower = in__.vector_constrain(M); + // transformed parameters + current_statement_begin__ = 162; + validate_non_negative_index("beta", "L", L); + validate_non_negative_index("beta", "j_plus_1", j_plus_1); + Eigen::Matrix beta(L, j_plus_1); + stan::math::initialize(beta, DUMMY_VAR__); + stan::math::fill(beta, DUMMY_VAR__); + stan::math::assign(beta,make_beta(j_plus_1, beta_off, lambda, eta, tau, pstream__)); + current_statement_begin__ = 167; + validate_non_negative_index("b", "T", T); + Eigen::Matrix b(T); + stan::math::initialize(b, DUMMY_VAR__); + stan::math::fill(b, DUMMY_VAR__); + stan::math::assign(b,multiply(raw_b, sigma_b)); + current_statement_begin__ = 168; + validate_non_negative_index("c", "j_plus_1", j_plus_1); + Eigen::Matrix c(j_plus_1); + stan::math::initialize(c, DUMMY_VAR__); + stan::math::fill(c, DUMMY_VAR__); + stan::math::assign(c,multiply(raw_c, sigma_c)); + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 162; + size_t beta_j_1_max__ = L; + size_t beta_j_2_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t j_2__ = 0; j_2__ < beta_j_2_max__; ++j_2__) { + if (stan::math::is_uninitialized(beta(j_1__, j_2__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: beta" << "(" << j_1__ << ", " << j_2__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable beta: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + } + current_statement_begin__ = 167; + size_t b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < b_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(b(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: b" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable b: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 168; + size_t c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < c_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(c(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: c" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable c: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + // model body + current_statement_begin__ = 173; + lp_accum__.add(std_normal_log(to_vector(beta_off))); + current_statement_begin__ = 174; + lp_accum__.add(std_normal_log(F_diag)); + current_statement_begin__ = 175; + lp_accum__.add(normal_log(F_lower, 0, 2)); + current_statement_begin__ = 176; + lp_accum__.add(std_normal_log(raw_b)); + current_statement_begin__ = 177; + lp_accum__.add(std_normal_log(sigma_b)); + current_statement_begin__ = 178; + lp_accum__.add(std_normal_log(raw_c)); + current_statement_begin__ = 179; + lp_accum__.add(std_normal_log(sigma_c)); + current_statement_begin__ = 180; + lp_accum__.add(std_normal_log(sigma)); + { + current_statement_begin__ = 182; + validate_non_negative_index("F", "T", T); + validate_non_negative_index("F", "L", L); + Eigen::Matrix F(T, L); + stan::math::initialize(F, DUMMY_VAR__); + stan::math::fill(F, DUMMY_VAR__); + stan::math::assign(F,make_F(T, F_diag, F_lower, pstream__)); + current_statement_begin__ = 183; + validate_non_negative_index("Y_target", "T", T); + validate_non_negative_index("Y_target", "1", 1); + std::vector > Y_target(1, Eigen::Matrix(T)); + stan::math::initialize(Y_target, DUMMY_VAR__); + stan::math::fill(Y_target, DUMMY_VAR__); + current_statement_begin__ = 184; + validate_non_negative_index("Y_temp", "T", T); + validate_non_negative_index("Y_temp", "j_plus_1", j_plus_1); + std::vector > Y_temp(j_plus_1, Eigen::Matrix(T)); + stan::math::initialize(Y_temp, DUMMY_VAR__); + stan::math::fill(Y_temp, DUMMY_VAR__); + current_statement_begin__ = 185; + stan::model::assign(Y_target, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + append_col(y_std, y_missing), + "assigning variable Y_target"); + current_statement_begin__ = 188; + stan::math::assign(Y_temp, append_array(Y_target, y_donors)); + current_statement_begin__ = 190; + for (int j = 1; j <= j_plus_1; ++j) { + current_statement_begin__ = 191; + lp_accum__.add(normal_id_glm_lpdf(transpose(get_base1(Y_temp, j, "Y_temp", 1)), F, add(b, get_base1(c, j, "c", 1)), stan::model::rvalue(beta, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "beta"), sigma)); + } + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("raw_b"); + names__.push_back("sigma_b"); + names__.push_back("raw_c"); + names__.push_back("sigma_c"); + names__.push_back("beta_off"); + names__.push_back("lambda"); + names__.push_back("eta"); + names__.push_back("tau"); + names__.push_back("y_missing"); + names__.push_back("sigma"); + names__.push_back("F_diag"); + names__.push_back("F_lower"); + names__.push_back("beta"); + names__.push_back("b"); + names__.push_back("c"); + names__.push_back("synth_out"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(T); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(j_plus_1); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(j_plus_1); + dims__.push_back(L); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(L); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(j_plus_1); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(L); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(M); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(L); + dims__.push_back(j_plus_1); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(T); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(j_plus_1); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(T); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_factor_model_with_covariates_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + Eigen::Matrix raw_b = in__.vector_constrain(T); + size_t raw_b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < raw_b_j_1_max__; ++j_1__) { + vars__.push_back(raw_b(j_1__)); + } + double sigma_b = in__.scalar_lb_constrain(0); + vars__.push_back(sigma_b); + Eigen::Matrix raw_c = in__.row_vector_constrain(j_plus_1); + size_t raw_c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < raw_c_j_1_max__; ++j_1__) { + vars__.push_back(raw_c(j_1__)); + } + double sigma_c = in__.scalar_lb_constrain(0); + vars__.push_back(sigma_c); + Eigen::Matrix beta_off = in__.matrix_constrain(j_plus_1, L); + size_t beta_off_j_2_max__ = L; + size_t beta_off_j_1_max__ = j_plus_1; + for (size_t j_2__ = 0; j_2__ < beta_off_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_off_j_1_max__; ++j_1__) { + vars__.push_back(beta_off(j_1__, j_2__)); + } + } + Eigen::Matrix lambda = in__.vector_lub_constrain(0, 1, L); + size_t lambda_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + vars__.push_back(lambda(j_1__)); + } + double eta = in__.scalar_lub_constrain(0, 1); + vars__.push_back(eta); + Eigen::Matrix tau = in__.vector_lub_constrain(0, 1, j_plus_1); + size_t tau_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < tau_j_1_max__; ++j_1__) { + vars__.push_back(tau(j_1__)); + } + Eigen::Matrix y_missing = in__.row_vector_constrain(N_pred); + size_t y_missing_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_missing_j_1_max__; ++j_1__) { + vars__.push_back(y_missing(j_1__)); + } + double sigma = in__.scalar_lb_constrain(0); + vars__.push_back(sigma); + Eigen::Matrix F_diag = in__.vector_lb_constrain(0, L); + size_t F_diag_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < F_diag_j_1_max__; ++j_1__) { + vars__.push_back(F_diag(j_1__)); + } + Eigen::Matrix F_lower = in__.vector_constrain(M); + size_t F_lower_j_1_max__ = M; + for (size_t j_1__ = 0; j_1__ < F_lower_j_1_max__; ++j_1__) { + vars__.push_back(F_lower(j_1__)); + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + // declare and define transformed parameters + current_statement_begin__ = 162; + validate_non_negative_index("beta", "L", L); + validate_non_negative_index("beta", "j_plus_1", j_plus_1); + Eigen::Matrix beta(L, j_plus_1); + stan::math::initialize(beta, DUMMY_VAR__); + stan::math::fill(beta, DUMMY_VAR__); + stan::math::assign(beta,make_beta(j_plus_1, beta_off, lambda, eta, tau, pstream__)); + current_statement_begin__ = 167; + validate_non_negative_index("b", "T", T); + Eigen::Matrix b(T); + stan::math::initialize(b, DUMMY_VAR__); + stan::math::fill(b, DUMMY_VAR__); + stan::math::assign(b,multiply(raw_b, sigma_b)); + current_statement_begin__ = 168; + validate_non_negative_index("c", "j_plus_1", j_plus_1); + Eigen::Matrix c(j_plus_1); + stan::math::initialize(c, DUMMY_VAR__); + stan::math::fill(c, DUMMY_VAR__); + stan::math::assign(c,multiply(raw_c, sigma_c)); + if (!include_gqs__ && !include_tparams__) return; + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + // write transformed parameters + if (include_tparams__) { + size_t beta_j_2_max__ = j_plus_1; + size_t beta_j_1_max__ = L; + for (size_t j_2__ = 0; j_2__ < beta_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + vars__.push_back(beta(j_1__, j_2__)); + } + } + size_t b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < b_j_1_max__; ++j_1__) { + vars__.push_back(b(j_1__)); + } + size_t c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < c_j_1_max__; ++j_1__) { + vars__.push_back(c(j_1__)); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 199; + validate_non_negative_index("synth_out", "T", T); + Eigen::Matrix synth_out(T); + stan::math::initialize(synth_out, DUMMY_VAR__); + stan::math::fill(synth_out, DUMMY_VAR__); + // generated quantities statements + { + current_statement_begin__ = 201; + validate_non_negative_index("F_", "T", T); + validate_non_negative_index("F_", "L", L); + Eigen::Matrix F_(T, L); + stan::math::initialize(F_, DUMMY_VAR__); + stan::math::fill(F_, DUMMY_VAR__); + stan::math::assign(F_,make_F(T, F_diag, F_lower, pstream__)); + current_statement_begin__ = 202; + validate_non_negative_index("Synth_", "T", T); + validate_non_negative_index("Synth_", "j_plus_1", j_plus_1); + Eigen::Matrix Synth_(T, j_plus_1); + stan::math::initialize(Synth_, DUMMY_VAR__); + stan::math::fill(Synth_, DUMMY_VAR__); + stan::math::assign(Synth_,add(add(multiply(F_, beta), multiply(b, j_ones)), multiply(t_ones, c))); + current_statement_begin__ = 206; + for (int t = 1; t <= T; ++t) { + current_statement_begin__ = 207; + stan::model::assign(synth_out, + stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), + ((normal_rng(get_base1(Synth_, t, 1, "Synth_", 1), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable synth_out"); + } + } + // validate, write generated quantities + current_statement_begin__ = 199; + size_t synth_out_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < synth_out_j_1_max__; ++j_1__) { + vars__.push_back(synth_out(j_1__)); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_factor_model_with_covariates"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t raw_b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < raw_b_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "raw_b" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_b"; + param_names__.push_back(param_name_stream__.str()); + size_t raw_c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < raw_c_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "raw_c" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_c"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_off_j_2_max__ = L; + size_t beta_off_j_1_max__ = j_plus_1; + for (size_t j_2__ = 0; j_2__ < beta_off_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_off_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta_off" << '.' << j_1__ + 1 << '.' << j_2__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t lambda_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "eta"; + param_names__.push_back(param_name_stream__.str()); + size_t tau_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < tau_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_missing_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_missing_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_missing" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t F_diag_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < F_diag_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "F_diag" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t F_lower_j_1_max__ = M; + for (size_t j_1__ = 0; j_1__ < F_lower_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "F_lower" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t beta_j_2_max__ = j_plus_1; + size_t beta_j_1_max__ = L; + for (size_t j_2__ = 0; j_2__ < beta_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1 << '.' << j_2__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < b_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "b" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < c_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "c" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t synth_out_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < synth_out_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "synth_out" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t raw_b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < raw_b_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "raw_b" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_b"; + param_names__.push_back(param_name_stream__.str()); + size_t raw_c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < raw_c_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "raw_c" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_c"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_off_j_2_max__ = L; + size_t beta_off_j_1_max__ = j_plus_1; + for (size_t j_2__ = 0; j_2__ < beta_off_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_off_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta_off" << '.' << j_1__ + 1 << '.' << j_2__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t lambda_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "eta"; + param_names__.push_back(param_name_stream__.str()); + size_t tau_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < tau_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_missing_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_missing_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_missing" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t F_diag_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < F_diag_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "F_diag" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t F_lower_j_1_max__ = M; + for (size_t j_1__ = 0; j_1__ < F_lower_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "F_lower" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t beta_j_2_max__ = j_plus_1; + size_t beta_j_1_max__ = L; + for (size_t j_2__ = 0; j_2__ < beta_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1 << '.' << j_2__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < b_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "b" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < c_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "c" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t synth_out_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < synth_out_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "synth_out" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } +}; // model +} // namespace +typedef model_factor_model_with_covariates_namespace::model_factor_model_with_covariates stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_factor_model_without_covariates.cc b/src/stanExports_factor_model_without_covariates.cc new file mode 100644 index 0000000..1e50f6d --- /dev/null +++ b/src/stanExports_factor_model_without_covariates.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_factor_model_without_covariates.h" + +RCPP_MODULE(stan_fit4factor_model_without_covariates_mod) { + + + class_ >("model_factor_model_without_covariates") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_factor_model_without_covariates.h b/src/stanExports_factor_model_without_covariates.h new file mode 100644 index 0000000..c305f9f --- /dev/null +++ b/src/stanExports_factor_model_without_covariates.h @@ -0,0 +1,1321 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_factor_model_without_covariates_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_factor_model_without_covariates"); + reader.add_event(24, 24, "include", "factor_functions.stan"); + reader.add_event(24, 0, "start", "factor_functions.stan"); + reader.add_event(96, 72, "end", "factor_functions.stan"); + reader.add_event(96, 25, "restart", "model_factor_model_without_covariates"); + reader.add_event(207, 134, "end", "model_factor_model_without_covariates"); + return reader; +} +template +Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> +make_F(const int& T, + const Eigen::Matrix& diagonal_loadings, + const Eigen::Matrix& lower_tri_loadings, std::ostream* pstream__) { + typedef typename boost::math::tools::promote_args::type local_scalar_t__; + typedef local_scalar_t__ fun_return_scalar_t__; + const static bool propto__ = true; + (void) propto__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + int current_statement_begin__ = -1; + try { + { + current_statement_begin__ = 51; + int L(0); + (void) L; // dummy to suppress unused var warning + stan::math::fill(L, std::numeric_limits::min()); + stan::math::assign(L,num_elements(diagonal_loadings)); + current_statement_begin__ = 52; + int M(0); + (void) M; // dummy to suppress unused var warning + stan::math::fill(M, std::numeric_limits::min()); + stan::math::assign(M,num_elements(lower_tri_loadings)); + current_statement_begin__ = 53; + validate_non_negative_index("F", "T", T); + validate_non_negative_index("F", "L", L); + Eigen::Matrix F(T, L); + stan::math::initialize(F, DUMMY_VAR__); + stan::math::fill(F, DUMMY_VAR__); + current_statement_begin__ = 55; + int idx(0); + (void) idx; // dummy to suppress unused var warning + stan::math::fill(idx, std::numeric_limits::min()); + stan::math::assign(idx,0); + current_statement_begin__ = 57; + for (int j = 1; j <= L; ++j) { + current_statement_begin__ = 58; + stan::model::assign(F, + stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + get_base1(diagonal_loadings, j, "diagonal_loadings", 1), + "assigning variable F"); + current_statement_begin__ = 59; + for (int i = (j + 1); i <= T; ++i) { + current_statement_begin__ = 60; + stan::math::assign(idx, (idx + 1)); + current_statement_begin__ = 61; + stan::model::assign(F, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + get_base1(lower_tri_loadings, idx, "lower_tri_loadings", 1), + "assigning variable F"); + } + } + current_statement_begin__ = 64; + for (int j = 1; j <= (L - 1); ++j) { + current_statement_begin__ = 65; + for (int i = (j + 1); i <= L; ++i) { + current_statement_begin__ = 65; + stan::model::assign(F, + stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list())), + 0, + "assigning variable F"); + } + } + current_statement_begin__ = 68; + return stan::math::promote_scalar(F); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } +} +struct make_F_functor__ { + template + Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> + operator()(const int& T, + const Eigen::Matrix& diagonal_loadings, + const Eigen::Matrix& lower_tri_loadings, std::ostream* pstream__) const { + return make_F(T, diagonal_loadings, lower_tri_loadings, pstream__); + } +}; +template +Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> +make_beta(const int& J, + const Eigen::Matrix& off, + const Eigen::Matrix& lambda, + const T3__& eta, + const Eigen::Matrix& tau, std::ostream* pstream__) { + typedef typename boost::math::tools::promote_args::type local_scalar_t__; + typedef local_scalar_t__ fun_return_scalar_t__; + const static bool propto__ = true; + (void) propto__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + int current_statement_begin__ = -1; + try { + { + current_statement_begin__ = 84; + int L(0); + (void) L; // dummy to suppress unused var warning + stan::math::fill(L, std::numeric_limits::min()); + stan::math::assign(L,cols(off)); + current_statement_begin__ = 85; + validate_non_negative_index("cache", "L", L); + Eigen::Matrix cache(L); + stan::math::initialize(cache, DUMMY_VAR__); + stan::math::fill(cache, DUMMY_VAR__); + stan::math::assign(cache,multiply(stan::math::tan(multiply((0.5 * stan::math::pi()), lambda)), stan::math::tan(((0.5 * stan::math::pi()) * eta)))); + current_statement_begin__ = 88; + validate_non_negative_index("tau_", "J", J); + Eigen::Matrix tau_(J); + stan::math::initialize(tau_, DUMMY_VAR__); + stan::math::fill(tau_, DUMMY_VAR__); + stan::math::assign(tau_,stan::math::tan(multiply((0.5 * stan::math::pi()), tau))); + current_statement_begin__ = 89; + validate_non_negative_index("out", "J", J); + validate_non_negative_index("out", "L", L); + Eigen::Matrix out(J, L); + stan::math::initialize(out, DUMMY_VAR__); + stan::math::fill(out, DUMMY_VAR__); + current_statement_begin__ = 91; + for (int j = 1; j <= J; ++j) { + current_statement_begin__ = 92; + stan::model::assign(out, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + multiply(get_base1(off, j, "off", 1), get_base1(tau_, j, "tau_", 1)), + "assigning variable out"); + } + current_statement_begin__ = 94; + return stan::math::promote_scalar(diag_pre_multiply(cache, transpose(out))); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } +} +struct make_beta_functor__ { + template + Eigen::Matrix::type, Eigen::Dynamic, Eigen::Dynamic> + operator()(const int& J, + const Eigen::Matrix& off, + const Eigen::Matrix& lambda, + const T3__& eta, + const Eigen::Matrix& tau, std::ostream* pstream__) const { + return make_beta(J, off, lambda, eta, tau, pstream__); + } +}; +#include +class model_factor_model_without_covariates + : public stan::model::model_base_crtp { +private: + int L; + int N; + row_vector_d y_treated_pre; + int J; + std::vector y_donors_pre; + int N_pred; + std::vector y_donors_post; + int T; + int j_plus_1; + int M; + row_vector_d j_ones; + vector_d t_ones; + std::vector y_donors; + std::vector y_donors_pre_std; + std::vector y_donors_post_std; + vector_d mean_y_donors_pre; + vector_d sd_y_donors_pre; + double mean_y; + double sd_y; + row_vector_d y_std; +public: + model_factor_model_without_covariates(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_factor_model_without_covariates(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_factor_model_without_covariates_namespace::model_factor_model_without_covariates"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 99; + context__.validate_dims("data initialization", "L", "int", context__.to_vec()); + L = int(0); + vals_i__ = context__.vals_i("L"); + pos__ = 0; + L = vals_i__[pos__++]; + check_greater_or_equal(function__, "L", L, 2); + current_statement_begin__ = 100; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 101; + validate_non_negative_index("y_treated_pre", "N", N); + context__.validate_dims("data initialization", "y_treated_pre", "row_vector_d", context__.to_vec(N)); + y_treated_pre = Eigen::Matrix(N); + vals_r__ = context__.vals_r("y_treated_pre"); + pos__ = 0; + size_t y_treated_pre_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_treated_pre_j_1_max__; ++j_1__) { + y_treated_pre(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 103; + context__.validate_dims("data initialization", "J", "int", context__.to_vec()); + J = int(0); + vals_i__ = context__.vals_i("J"); + pos__ = 0; + J = vals_i__[pos__++]; + check_greater_or_equal(function__, "J", J, 0); + current_statement_begin__ = 104; + validate_non_negative_index("y_donors_pre", "N", N); + validate_non_negative_index("y_donors_pre", "J", J); + context__.validate_dims("data initialization", "y_donors_pre", "row_vector_d", context__.to_vec(J,N)); + y_donors_pre = std::vector >(J, Eigen::Matrix(N)); + vals_r__ = context__.vals_r("y_donors_pre"); + pos__ = 0; + size_t y_donors_pre_j_1_max__ = N; + size_t y_donors_pre_k_0_max__ = J; + for (size_t j_1__ = 0; j_1__ < y_donors_pre_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_donors_pre_k_0_max__; ++k_0__) { + y_donors_pre[k_0__](j_1__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 106; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 107; + validate_non_negative_index("y_donors_post", "N_pred", N_pred); + validate_non_negative_index("y_donors_post", "J", J); + context__.validate_dims("data initialization", "y_donors_post", "row_vector_d", context__.to_vec(J,N_pred)); + y_donors_post = std::vector >(J, Eigen::Matrix(N_pred)); + vals_r__ = context__.vals_r("y_donors_post"); + pos__ = 0; + size_t y_donors_post_j_1_max__ = N_pred; + size_t y_donors_post_k_0_max__ = J; + for (size_t j_1__ = 0; j_1__ < y_donors_post_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_donors_post_k_0_max__; ++k_0__) { + y_donors_post[k_0__](j_1__) = vals_r__[pos__++]; + } + } + // initialize transformed data variables + current_statement_begin__ = 111; + T = int(0); + stan::math::fill(T, std::numeric_limits::min()); + stan::math::assign(T,(N + N_pred)); + current_statement_begin__ = 112; + j_plus_1 = int(0); + stan::math::fill(j_plus_1, std::numeric_limits::min()); + stan::math::assign(j_plus_1,(J + 1)); + current_statement_begin__ = 113; + M = int(0); + stan::math::fill(M, std::numeric_limits::min()); + stan::math::assign(M,((L * (T - L)) + divide((L * (L - 1)), 2))); + current_statement_begin__ = 114; + validate_non_negative_index("j_ones", "j_plus_1", j_plus_1); + j_ones = Eigen::Matrix(j_plus_1); + stan::math::fill(j_ones, DUMMY_VAR__); + stan::math::assign(j_ones,rep_row_vector(1, j_plus_1)); + current_statement_begin__ = 115; + validate_non_negative_index("t_ones", "T", T); + t_ones = Eigen::Matrix(T); + stan::math::fill(t_ones, DUMMY_VAR__); + stan::math::assign(t_ones,rep_vector(1.0, T)); + current_statement_begin__ = 117; + validate_non_negative_index("y_donors", "T", T); + validate_non_negative_index("y_donors", "J", J); + y_donors = std::vector >(J, Eigen::Matrix(T)); + stan::math::fill(y_donors, DUMMY_VAR__); + current_statement_begin__ = 119; + validate_non_negative_index("y_donors_pre_std", "N", N); + validate_non_negative_index("y_donors_pre_std", "J", J); + y_donors_pre_std = std::vector >(J, Eigen::Matrix(N)); + stan::math::fill(y_donors_pre_std, DUMMY_VAR__); + current_statement_begin__ = 120; + validate_non_negative_index("y_donors_post_std", "N_pred", N_pred); + validate_non_negative_index("y_donors_post_std", "J", J); + y_donors_post_std = std::vector >(J, Eigen::Matrix(N_pred)); + stan::math::fill(y_donors_post_std, DUMMY_VAR__); + current_statement_begin__ = 121; + validate_non_negative_index("mean_y_donors_pre", "J", J); + mean_y_donors_pre = Eigen::Matrix(J); + stan::math::fill(mean_y_donors_pre, DUMMY_VAR__); + current_statement_begin__ = 122; + validate_non_negative_index("sd_y_donors_pre", "J", J); + sd_y_donors_pre = Eigen::Matrix(J); + stan::math::fill(sd_y_donors_pre, DUMMY_VAR__); + current_statement_begin__ = 123; + mean_y = double(0); + stan::math::fill(mean_y, DUMMY_VAR__); + stan::math::assign(mean_y,mean(y_treated_pre)); + current_statement_begin__ = 124; + sd_y = double(0); + stan::math::fill(sd_y, DUMMY_VAR__); + stan::math::assign(sd_y,sd(y_treated_pre)); + current_statement_begin__ = 125; + validate_non_negative_index("y_std", "N", N); + y_std = Eigen::Matrix(N); + stan::math::fill(y_std, DUMMY_VAR__); + stan::math::assign(y_std,divide(subtract(y_treated_pre, mean_y), sd_y)); + // execute transformed data statements + current_statement_begin__ = 127; + for (int j = 1; j <= J; ++j) { + current_statement_begin__ = 128; + stan::model::assign(mean_y_donors_pre, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + mean(get_base1(y_donors_pre, j, "y_donors_pre", 1)), + "assigning variable mean_y_donors_pre"); + current_statement_begin__ = 129; + stan::model::assign(sd_y_donors_pre, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + sd(get_base1(y_donors_pre, j, "y_donors_pre", 1)), + "assigning variable sd_y_donors_pre"); + current_statement_begin__ = 130; + stan::model::assign(y_donors_pre_std, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + divide(subtract(get_base1(y_donors_pre, j, "y_donors_pre", 1), get_base1(mean_y_donors_pre, j, "mean_y_donors_pre", 1)), get_base1(sd_y_donors_pre, j, "sd_y_donors_pre", 1)), + "assigning variable y_donors_pre_std"); + current_statement_begin__ = 132; + stan::model::assign(y_donors_post_std, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + divide(subtract(get_base1(y_donors_post, j, "y_donors_post", 1), get_base1(mean_y_donors_pre, j, "mean_y_donors_pre", 1)), get_base1(sd_y_donors_pre, j, "sd_y_donors_pre", 1)), + "assigning variable y_donors_post_std"); + current_statement_begin__ = 134; + stan::model::assign(y_donors, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + append_col(get_base1(y_donors_pre_std, j, "y_donors_pre_std", 1), get_base1(y_donors_post_std, j, "y_donors_post_std", 1)), + "assigning variable y_donors"); + } + // validate transformed data + current_statement_begin__ = 113; + check_greater_or_equal(function__, "M", M, 1); + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 139; + validate_non_negative_index("raw_b", "T", T); + num_params_r__ += T; + current_statement_begin__ = 140; + num_params_r__ += 1; + current_statement_begin__ = 141; + validate_non_negative_index("raw_c", "j_plus_1", j_plus_1); + num_params_r__ += j_plus_1; + current_statement_begin__ = 142; + num_params_r__ += 1; + current_statement_begin__ = 144; + validate_non_negative_index("beta_off", "j_plus_1", j_plus_1); + validate_non_negative_index("beta_off", "L", L); + num_params_r__ += (j_plus_1 * L); + current_statement_begin__ = 145; + validate_non_negative_index("lambda", "L", L); + num_params_r__ += L; + current_statement_begin__ = 146; + num_params_r__ += 1; + current_statement_begin__ = 147; + validate_non_negative_index("tau", "j_plus_1", j_plus_1); + num_params_r__ += j_plus_1; + current_statement_begin__ = 149; + validate_non_negative_index("y_missing", "N_pred", N_pred); + num_params_r__ += N_pred; + current_statement_begin__ = 151; + num_params_r__ += 1; + current_statement_begin__ = 153; + validate_non_negative_index("F_diag", "L", L); + num_params_r__ += L; + current_statement_begin__ = 154; + validate_non_negative_index("F_lower", "M", M); + num_params_r__ += M; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_factor_model_without_covariates() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 139; + if (!(context__.contains_r("raw_b"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable raw_b missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("raw_b"); + pos__ = 0U; + validate_non_negative_index("raw_b", "T", T); + context__.validate_dims("parameter initialization", "raw_b", "vector_d", context__.to_vec(T)); + Eigen::Matrix raw_b(T); + size_t raw_b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < raw_b_j_1_max__; ++j_1__) { + raw_b(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(raw_b); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable raw_b: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 140; + if (!(context__.contains_r("sigma_b"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma_b missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma_b"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma_b", "double", context__.to_vec()); + double sigma_b(0); + sigma_b = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma_b); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma_b: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 141; + if (!(context__.contains_r("raw_c"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable raw_c missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("raw_c"); + pos__ = 0U; + validate_non_negative_index("raw_c", "j_plus_1", j_plus_1); + context__.validate_dims("parameter initialization", "raw_c", "row_vector_d", context__.to_vec(j_plus_1)); + Eigen::Matrix raw_c(j_plus_1); + size_t raw_c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < raw_c_j_1_max__; ++j_1__) { + raw_c(j_1__) = vals_r__[pos__++]; + } + try { + writer__.row_vector_unconstrain(raw_c); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable raw_c: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 142; + if (!(context__.contains_r("sigma_c"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma_c missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma_c"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma_c", "double", context__.to_vec()); + double sigma_c(0); + sigma_c = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma_c); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma_c: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 144; + if (!(context__.contains_r("beta_off"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta_off missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta_off"); + pos__ = 0U; + validate_non_negative_index("beta_off", "j_plus_1", j_plus_1); + validate_non_negative_index("beta_off", "L", L); + context__.validate_dims("parameter initialization", "beta_off", "matrix_d", context__.to_vec(j_plus_1,L)); + Eigen::Matrix beta_off(j_plus_1, L); + size_t beta_off_j_2_max__ = L; + size_t beta_off_j_1_max__ = j_plus_1; + for (size_t j_2__ = 0; j_2__ < beta_off_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_off_j_1_max__; ++j_1__) { + beta_off(j_1__, j_2__) = vals_r__[pos__++]; + } + } + try { + writer__.matrix_unconstrain(beta_off); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta_off: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 145; + if (!(context__.contains_r("lambda"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable lambda missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("lambda"); + pos__ = 0U; + validate_non_negative_index("lambda", "L", L); + context__.validate_dims("parameter initialization", "lambda", "vector_d", context__.to_vec(L)); + Eigen::Matrix lambda(L); + size_t lambda_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + lambda(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_lub_unconstrain(0, 1, lambda); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable lambda: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 146; + if (!(context__.contains_r("eta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable eta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("eta"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "eta", "double", context__.to_vec()); + double eta(0); + eta = vals_r__[pos__++]; + try { + writer__.scalar_lub_unconstrain(0, 1, eta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable eta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 147; + if (!(context__.contains_r("tau"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable tau missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("tau"); + pos__ = 0U; + validate_non_negative_index("tau", "j_plus_1", j_plus_1); + context__.validate_dims("parameter initialization", "tau", "vector_d", context__.to_vec(j_plus_1)); + Eigen::Matrix tau(j_plus_1); + size_t tau_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < tau_j_1_max__; ++j_1__) { + tau(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_lub_unconstrain(0, 1, tau); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable tau: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 149; + if (!(context__.contains_r("y_missing"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable y_missing missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("y_missing"); + pos__ = 0U; + validate_non_negative_index("y_missing", "N_pred", N_pred); + context__.validate_dims("parameter initialization", "y_missing", "row_vector_d", context__.to_vec(N_pred)); + Eigen::Matrix y_missing(N_pred); + size_t y_missing_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_missing_j_1_max__; ++j_1__) { + y_missing(j_1__) = vals_r__[pos__++]; + } + try { + writer__.row_vector_unconstrain(y_missing); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable y_missing: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 151; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec()); + double sigma(0); + sigma = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 153; + if (!(context__.contains_r("F_diag"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable F_diag missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("F_diag"); + pos__ = 0U; + validate_non_negative_index("F_diag", "L", L); + context__.validate_dims("parameter initialization", "F_diag", "vector_d", context__.to_vec(L)); + Eigen::Matrix F_diag(L); + size_t F_diag_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < F_diag_j_1_max__; ++j_1__) { + F_diag(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_lb_unconstrain(0, F_diag); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable F_diag: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 154; + if (!(context__.contains_r("F_lower"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable F_lower missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("F_lower"); + pos__ = 0U; + validate_non_negative_index("F_lower", "M", M); + context__.validate_dims("parameter initialization", "F_lower", "vector_d", context__.to_vec(M)); + Eigen::Matrix F_lower(M); + size_t F_lower_j_1_max__ = M; + for (size_t j_1__ = 0; j_1__ < F_lower_j_1_max__; ++j_1__) { + F_lower(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(F_lower); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable F_lower: ") + e.what()), current_statement_begin__, prog_reader__()); + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 139; + Eigen::Matrix raw_b; + (void) raw_b; // dummy to suppress unused var warning + if (jacobian__) + raw_b = in__.vector_constrain(T, lp__); + else + raw_b = in__.vector_constrain(T); + current_statement_begin__ = 140; + local_scalar_t__ sigma_b; + (void) sigma_b; // dummy to suppress unused var warning + if (jacobian__) + sigma_b = in__.scalar_lb_constrain(0, lp__); + else + sigma_b = in__.scalar_lb_constrain(0); + current_statement_begin__ = 141; + Eigen::Matrix raw_c; + (void) raw_c; // dummy to suppress unused var warning + if (jacobian__) + raw_c = in__.row_vector_constrain(j_plus_1, lp__); + else + raw_c = in__.row_vector_constrain(j_plus_1); + current_statement_begin__ = 142; + local_scalar_t__ sigma_c; + (void) sigma_c; // dummy to suppress unused var warning + if (jacobian__) + sigma_c = in__.scalar_lb_constrain(0, lp__); + else + sigma_c = in__.scalar_lb_constrain(0); + current_statement_begin__ = 144; + Eigen::Matrix beta_off; + (void) beta_off; // dummy to suppress unused var warning + if (jacobian__) + beta_off = in__.matrix_constrain(j_plus_1, L, lp__); + else + beta_off = in__.matrix_constrain(j_plus_1, L); + current_statement_begin__ = 145; + Eigen::Matrix lambda; + (void) lambda; // dummy to suppress unused var warning + if (jacobian__) + lambda = in__.vector_lub_constrain(0, 1, L, lp__); + else + lambda = in__.vector_lub_constrain(0, 1, L); + current_statement_begin__ = 146; + local_scalar_t__ eta; + (void) eta; // dummy to suppress unused var warning + if (jacobian__) + eta = in__.scalar_lub_constrain(0, 1, lp__); + else + eta = in__.scalar_lub_constrain(0, 1); + current_statement_begin__ = 147; + Eigen::Matrix tau; + (void) tau; // dummy to suppress unused var warning + if (jacobian__) + tau = in__.vector_lub_constrain(0, 1, j_plus_1, lp__); + else + tau = in__.vector_lub_constrain(0, 1, j_plus_1); + current_statement_begin__ = 149; + Eigen::Matrix y_missing; + (void) y_missing; // dummy to suppress unused var warning + if (jacobian__) + y_missing = in__.row_vector_constrain(N_pred, lp__); + else + y_missing = in__.row_vector_constrain(N_pred); + current_statement_begin__ = 151; + local_scalar_t__ sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.scalar_lb_constrain(0, lp__); + else + sigma = in__.scalar_lb_constrain(0); + current_statement_begin__ = 153; + Eigen::Matrix F_diag; + (void) F_diag; // dummy to suppress unused var warning + if (jacobian__) + F_diag = in__.vector_lb_constrain(0, L, lp__); + else + F_diag = in__.vector_lb_constrain(0, L); + current_statement_begin__ = 154; + Eigen::Matrix F_lower; + (void) F_lower; // dummy to suppress unused var warning + if (jacobian__) + F_lower = in__.vector_constrain(M, lp__); + else + F_lower = in__.vector_constrain(M); + // transformed parameters + current_statement_begin__ = 158; + validate_non_negative_index("beta", "L", L); + validate_non_negative_index("beta", "j_plus_1", j_plus_1); + Eigen::Matrix beta(L, j_plus_1); + stan::math::initialize(beta, DUMMY_VAR__); + stan::math::fill(beta, DUMMY_VAR__); + stan::math::assign(beta,make_beta(j_plus_1, beta_off, lambda, eta, tau, pstream__)); + current_statement_begin__ = 163; + validate_non_negative_index("b", "T", T); + Eigen::Matrix b(T); + stan::math::initialize(b, DUMMY_VAR__); + stan::math::fill(b, DUMMY_VAR__); + stan::math::assign(b,multiply(raw_b, sigma_b)); + current_statement_begin__ = 164; + validate_non_negative_index("c", "j_plus_1", j_plus_1); + Eigen::Matrix c(j_plus_1); + stan::math::initialize(c, DUMMY_VAR__); + stan::math::fill(c, DUMMY_VAR__); + stan::math::assign(c,multiply(raw_c, sigma_c)); + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 158; + size_t beta_j_1_max__ = L; + size_t beta_j_2_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t j_2__ = 0; j_2__ < beta_j_2_max__; ++j_2__) { + if (stan::math::is_uninitialized(beta(j_1__, j_2__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: beta" << "(" << j_1__ << ", " << j_2__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable beta: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + } + current_statement_begin__ = 163; + size_t b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < b_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(b(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: b" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable b: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 164; + size_t c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < c_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(c(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: c" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable c: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + // model body + current_statement_begin__ = 168; + lp_accum__.add(std_normal_log(to_vector(beta_off))); + current_statement_begin__ = 169; + lp_accum__.add(std_normal_log(F_diag)); + current_statement_begin__ = 170; + lp_accum__.add(normal_log(F_lower, 0, 2)); + current_statement_begin__ = 171; + lp_accum__.add(std_normal_log(raw_b)); + current_statement_begin__ = 172; + lp_accum__.add(std_normal_log(sigma_b)); + current_statement_begin__ = 173; + lp_accum__.add(std_normal_log(raw_c)); + current_statement_begin__ = 174; + lp_accum__.add(std_normal_log(sigma_c)); + current_statement_begin__ = 175; + lp_accum__.add(std_normal_log(sigma)); + { + current_statement_begin__ = 177; + validate_non_negative_index("F", "T", T); + validate_non_negative_index("F", "L", L); + Eigen::Matrix F(T, L); + stan::math::initialize(F, DUMMY_VAR__); + stan::math::fill(F, DUMMY_VAR__); + stan::math::assign(F,make_F(T, F_diag, F_lower, pstream__)); + current_statement_begin__ = 178; + validate_non_negative_index("Y_target", "T", T); + validate_non_negative_index("Y_target", "1", 1); + std::vector > Y_target(1, Eigen::Matrix(T)); + stan::math::initialize(Y_target, DUMMY_VAR__); + stan::math::fill(Y_target, DUMMY_VAR__); + current_statement_begin__ = 179; + validate_non_negative_index("Y_temp", "T", T); + validate_non_negative_index("Y_temp", "j_plus_1", j_plus_1); + std::vector > Y_temp(j_plus_1, Eigen::Matrix(T)); + stan::math::initialize(Y_temp, DUMMY_VAR__); + stan::math::fill(Y_temp, DUMMY_VAR__); + current_statement_begin__ = 180; + stan::model::assign(Y_target, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + append_col(y_std, y_missing), + "assigning variable Y_target"); + current_statement_begin__ = 183; + stan::math::assign(Y_temp, append_array(Y_target, y_donors)); + current_statement_begin__ = 186; + for (int j = 1; j <= j_plus_1; ++j) { + current_statement_begin__ = 187; + lp_accum__.add(normal_id_glm_lpdf(transpose(get_base1(Y_temp, j, "Y_temp", 1)), F, add(b, get_base1(c, j, "c", 1)), stan::model::rvalue(beta, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "beta"), sigma)); + } + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("raw_b"); + names__.push_back("sigma_b"); + names__.push_back("raw_c"); + names__.push_back("sigma_c"); + names__.push_back("beta_off"); + names__.push_back("lambda"); + names__.push_back("eta"); + names__.push_back("tau"); + names__.push_back("y_missing"); + names__.push_back("sigma"); + names__.push_back("F_diag"); + names__.push_back("F_lower"); + names__.push_back("beta"); + names__.push_back("b"); + names__.push_back("c"); + names__.push_back("synth_out"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(T); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(j_plus_1); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(j_plus_1); + dims__.push_back(L); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(L); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(j_plus_1); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(L); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(M); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(L); + dims__.push_back(j_plus_1); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(T); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(j_plus_1); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(T); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_factor_model_without_covariates_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + Eigen::Matrix raw_b = in__.vector_constrain(T); + size_t raw_b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < raw_b_j_1_max__; ++j_1__) { + vars__.push_back(raw_b(j_1__)); + } + double sigma_b = in__.scalar_lb_constrain(0); + vars__.push_back(sigma_b); + Eigen::Matrix raw_c = in__.row_vector_constrain(j_plus_1); + size_t raw_c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < raw_c_j_1_max__; ++j_1__) { + vars__.push_back(raw_c(j_1__)); + } + double sigma_c = in__.scalar_lb_constrain(0); + vars__.push_back(sigma_c); + Eigen::Matrix beta_off = in__.matrix_constrain(j_plus_1, L); + size_t beta_off_j_2_max__ = L; + size_t beta_off_j_1_max__ = j_plus_1; + for (size_t j_2__ = 0; j_2__ < beta_off_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_off_j_1_max__; ++j_1__) { + vars__.push_back(beta_off(j_1__, j_2__)); + } + } + Eigen::Matrix lambda = in__.vector_lub_constrain(0, 1, L); + size_t lambda_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + vars__.push_back(lambda(j_1__)); + } + double eta = in__.scalar_lub_constrain(0, 1); + vars__.push_back(eta); + Eigen::Matrix tau = in__.vector_lub_constrain(0, 1, j_plus_1); + size_t tau_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < tau_j_1_max__; ++j_1__) { + vars__.push_back(tau(j_1__)); + } + Eigen::Matrix y_missing = in__.row_vector_constrain(N_pred); + size_t y_missing_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_missing_j_1_max__; ++j_1__) { + vars__.push_back(y_missing(j_1__)); + } + double sigma = in__.scalar_lb_constrain(0); + vars__.push_back(sigma); + Eigen::Matrix F_diag = in__.vector_lb_constrain(0, L); + size_t F_diag_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < F_diag_j_1_max__; ++j_1__) { + vars__.push_back(F_diag(j_1__)); + } + Eigen::Matrix F_lower = in__.vector_constrain(M); + size_t F_lower_j_1_max__ = M; + for (size_t j_1__ = 0; j_1__ < F_lower_j_1_max__; ++j_1__) { + vars__.push_back(F_lower(j_1__)); + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + // declare and define transformed parameters + current_statement_begin__ = 158; + validate_non_negative_index("beta", "L", L); + validate_non_negative_index("beta", "j_plus_1", j_plus_1); + Eigen::Matrix beta(L, j_plus_1); + stan::math::initialize(beta, DUMMY_VAR__); + stan::math::fill(beta, DUMMY_VAR__); + stan::math::assign(beta,make_beta(j_plus_1, beta_off, lambda, eta, tau, pstream__)); + current_statement_begin__ = 163; + validate_non_negative_index("b", "T", T); + Eigen::Matrix b(T); + stan::math::initialize(b, DUMMY_VAR__); + stan::math::fill(b, DUMMY_VAR__); + stan::math::assign(b,multiply(raw_b, sigma_b)); + current_statement_begin__ = 164; + validate_non_negative_index("c", "j_plus_1", j_plus_1); + Eigen::Matrix c(j_plus_1); + stan::math::initialize(c, DUMMY_VAR__); + stan::math::fill(c, DUMMY_VAR__); + stan::math::assign(c,multiply(raw_c, sigma_c)); + if (!include_gqs__ && !include_tparams__) return; + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + // write transformed parameters + if (include_tparams__) { + size_t beta_j_2_max__ = j_plus_1; + size_t beta_j_1_max__ = L; + for (size_t j_2__ = 0; j_2__ < beta_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + vars__.push_back(beta(j_1__, j_2__)); + } + } + size_t b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < b_j_1_max__; ++j_1__) { + vars__.push_back(b(j_1__)); + } + size_t c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < c_j_1_max__; ++j_1__) { + vars__.push_back(c(j_1__)); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 195; + validate_non_negative_index("synth_out", "T", T); + Eigen::Matrix synth_out(T); + stan::math::initialize(synth_out, DUMMY_VAR__); + stan::math::fill(synth_out, DUMMY_VAR__); + // generated quantities statements + { + current_statement_begin__ = 197; + validate_non_negative_index("F_", "T", T); + validate_non_negative_index("F_", "L", L); + Eigen::Matrix F_(T, L); + stan::math::initialize(F_, DUMMY_VAR__); + stan::math::fill(F_, DUMMY_VAR__); + stan::math::assign(F_,make_F(T, F_diag, F_lower, pstream__)); + current_statement_begin__ = 198; + validate_non_negative_index("Synth_", "T", T); + validate_non_negative_index("Synth_", "j_plus_1", j_plus_1); + Eigen::Matrix Synth_(T, j_plus_1); + stan::math::initialize(Synth_, DUMMY_VAR__); + stan::math::fill(Synth_, DUMMY_VAR__); + stan::math::assign(Synth_,add(add(multiply(F_, beta), multiply(b, j_ones)), multiply(t_ones, c))); + current_statement_begin__ = 202; + for (int t = 1; t <= T; ++t) { + current_statement_begin__ = 203; + stan::model::assign(synth_out, + stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), + ((normal_rng(get_base1(Synth_, t, 1, "Synth_", 1), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable synth_out"); + } + } + // validate, write generated quantities + current_statement_begin__ = 195; + size_t synth_out_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < synth_out_j_1_max__; ++j_1__) { + vars__.push_back(synth_out(j_1__)); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_factor_model_without_covariates"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t raw_b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < raw_b_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "raw_b" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_b"; + param_names__.push_back(param_name_stream__.str()); + size_t raw_c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < raw_c_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "raw_c" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_c"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_off_j_2_max__ = L; + size_t beta_off_j_1_max__ = j_plus_1; + for (size_t j_2__ = 0; j_2__ < beta_off_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_off_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta_off" << '.' << j_1__ + 1 << '.' << j_2__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t lambda_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "eta"; + param_names__.push_back(param_name_stream__.str()); + size_t tau_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < tau_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_missing_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_missing_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_missing" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t F_diag_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < F_diag_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "F_diag" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t F_lower_j_1_max__ = M; + for (size_t j_1__ = 0; j_1__ < F_lower_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "F_lower" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t beta_j_2_max__ = j_plus_1; + size_t beta_j_1_max__ = L; + for (size_t j_2__ = 0; j_2__ < beta_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1 << '.' << j_2__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < b_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "b" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < c_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "c" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t synth_out_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < synth_out_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "synth_out" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t raw_b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < raw_b_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "raw_b" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_b"; + param_names__.push_back(param_name_stream__.str()); + size_t raw_c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < raw_c_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "raw_c" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma_c"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_off_j_2_max__ = L; + size_t beta_off_j_1_max__ = j_plus_1; + for (size_t j_2__ = 0; j_2__ < beta_off_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_off_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta_off" << '.' << j_1__ + 1 << '.' << j_2__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t lambda_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < lambda_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "eta"; + param_names__.push_back(param_name_stream__.str()); + size_t tau_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < tau_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_missing_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_missing_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_missing" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t F_diag_j_1_max__ = L; + for (size_t j_1__ = 0; j_1__ < F_diag_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "F_diag" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t F_lower_j_1_max__ = M; + for (size_t j_1__ = 0; j_1__ < F_lower_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "F_lower" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t beta_j_2_max__ = j_plus_1; + size_t beta_j_1_max__ = L; + for (size_t j_2__ = 0; j_2__ < beta_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1 << '.' << j_2__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t b_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < b_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "b" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t c_j_1_max__ = j_plus_1; + for (size_t j_1__ = 0; j_1__ < c_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "c" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t synth_out_j_1_max__ = T; + for (size_t j_1__ = 0; j_1__ < synth_out_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "synth_out" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } +}; // model +} // namespace +typedef model_factor_model_without_covariates_namespace::model_factor_model_without_covariates stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_model1.cc b/src/stanExports_model1.cc new file mode 100644 index 0000000..b6519c1 --- /dev/null +++ b/src/stanExports_model1.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_model1.h" + +RCPP_MODULE(stan_fit4model1_mod) { + + + class_ >("model_model1") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_model1.h b/src/stanExports_model1.h new file mode 100644 index 0000000..aa5b2d1 --- /dev/null +++ b/src/stanExports_model1.h @@ -0,0 +1,494 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_model1_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_model1"); + reader.add_event(69, 67, "end", "model_model1"); + return reader; +} +#include +class model_model1 + : public stan::model::model_base_crtp { +private: + int N; + vector_d y; + int K; + matrix_d X; + int N_pred; + matrix_d X_pred; + matrix_d X_std; + matrix_d X_pred_std; + vector_d mean_X; + vector_d sd_X; + double mean_y; + double sd_y; + vector_d y_std; +public: + model_model1(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_model1(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_model1_namespace::model_model1"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 21; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 22; + validate_non_negative_index("y", "N", N); + context__.validate_dims("data initialization", "y", "vector_d", context__.to_vec(N)); + y = Eigen::Matrix(N); + vals_r__ = context__.vals_r("y"); + pos__ = 0; + size_t y_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_j_1_max__; ++j_1__) { + y(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 23; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 0); + current_statement_begin__ = 24; + validate_non_negative_index("X", "N", N); + validate_non_negative_index("X", "K", K); + context__.validate_dims("data initialization", "X", "matrix_d", context__.to_vec(N,K)); + X = Eigen::Matrix(N, K); + vals_r__ = context__.vals_r("X"); + pos__ = 0; + size_t X_j_2_max__ = K; + size_t X_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < X_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_j_1_max__; ++j_1__) { + X(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 25; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 26; + validate_non_negative_index("X_pred", "N_pred", N_pred); + validate_non_negative_index("X_pred", "K", K); + context__.validate_dims("data initialization", "X_pred", "matrix_d", context__.to_vec(N_pred,K)); + X_pred = Eigen::Matrix(N_pred, K); + vals_r__ = context__.vals_r("X_pred"); + pos__ = 0; + size_t X_pred_j_2_max__ = K; + size_t X_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < X_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_pred_j_1_max__; ++j_1__) { + X_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + // initialize transformed data variables + current_statement_begin__ = 30; + validate_non_negative_index("X_std", "N", N); + validate_non_negative_index("X_std", "K", K); + X_std = Eigen::Matrix(N, K); + stan::math::fill(X_std, DUMMY_VAR__); + current_statement_begin__ = 31; + validate_non_negative_index("X_pred_std", "N_pred", N_pred); + validate_non_negative_index("X_pred_std", "K", K); + X_pred_std = Eigen::Matrix(N_pred, K); + stan::math::fill(X_pred_std, DUMMY_VAR__); + current_statement_begin__ = 32; + validate_non_negative_index("mean_X", "K", K); + mean_X = Eigen::Matrix(K); + stan::math::fill(mean_X, DUMMY_VAR__); + current_statement_begin__ = 33; + validate_non_negative_index("sd_X", "K", K); + sd_X = Eigen::Matrix(K); + stan::math::fill(sd_X, DUMMY_VAR__); + current_statement_begin__ = 34; + mean_y = double(0); + stan::math::fill(mean_y, DUMMY_VAR__); + stan::math::assign(mean_y,mean(y)); + current_statement_begin__ = 35; + sd_y = double(0); + stan::math::fill(sd_y, DUMMY_VAR__); + stan::math::assign(sd_y,sd(y)); + current_statement_begin__ = 36; + validate_non_negative_index("y_std", "N", N); + y_std = Eigen::Matrix(N); + stan::math::fill(y_std, DUMMY_VAR__); + stan::math::assign(y_std,divide(subtract(y, mean_y), sd_y)); + // execute transformed data statements + current_statement_begin__ = 38; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 39; + stan::model::assign(mean_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + mean(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable mean_X"); + current_statement_begin__ = 40; + stan::model::assign(sd_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + sd(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable sd_X"); + current_statement_begin__ = 41; + stan::model::assign(X_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_std"); + current_statement_begin__ = 42; + stan::model::assign(X_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X_pred"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_pred_std"); + } + // validate transformed data + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 47; + num_params_r__ += 1; + current_statement_begin__ = 48; + validate_non_negative_index("beta", "K", K); + num_params_r__ += (K - 1); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_model1() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 47; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec()); + double sigma(0); + sigma = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 48; + if (!(context__.contains_r("beta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta"); + pos__ = 0U; + validate_non_negative_index("beta", "K", K); + context__.validate_dims("parameter initialization", "beta", "vector_d", context__.to_vec(K)); + Eigen::Matrix beta(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + beta(j_1__) = vals_r__[pos__++]; + } + try { + writer__.simplex_unconstrain(beta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 47; + local_scalar_t__ sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.scalar_lb_constrain(0, lp__); + else + sigma = in__.scalar_lb_constrain(0); + current_statement_begin__ = 48; + Eigen::Matrix beta; + (void) beta; // dummy to suppress unused var warning + if (jacobian__) + beta = in__.simplex_constrain(K, lp__); + else + beta = in__.simplex_constrain(K); + // model body + current_statement_begin__ = 53; + lp_accum__.add(normal_log(sigma, 0, 1)); + current_statement_begin__ = 54; + lp_accum__.add(normal_log(y_std, multiply(X_std, beta), sigma)); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("sigma"); + names__.push_back("beta"); + names__.push_back("y_sim"); + names__.push_back("y_pred"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_model1_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + double sigma = in__.scalar_lb_constrain(0); + vars__.push_back(sigma); + Eigen::Matrix beta = in__.simplex_constrain(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + vars__.push_back(beta(j_1__)); + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + if (!include_gqs__ && !include_tparams__) return; + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 59; + validate_non_negative_index("y_sim", "N", N); + Eigen::Matrix y_sim(N); + stan::math::initialize(y_sim, DUMMY_VAR__); + stan::math::fill(y_sim, DUMMY_VAR__); + current_statement_begin__ = 60; + validate_non_negative_index("y_pred", "N_pred", N_pred); + Eigen::Matrix y_pred(N_pred); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 61; + for (int i = 1; i <= N; ++i) { + current_statement_begin__ = 62; + stan::model::assign(y_sim, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + ((normal_rng(multiply(stan::model::rvalue(X_std, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_std"), beta), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable y_sim"); + } + current_statement_begin__ = 64; + for (int j = 1; j <= N_pred; ++j) { + current_statement_begin__ = 65; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + ((normal_rng(multiply(stan::model::rvalue(X_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_pred_std"), beta), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable y_pred"); + } + // validate, write generated quantities + current_statement_begin__ = 59; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + vars__.push_back(y_sim(j_1__)); + } + current_statement_begin__ = 60; + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + vars__.push_back(y_pred(j_1__)); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_model1"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_j_1_max__ = (K - 1); + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } +}; // model +} // namespace +typedef model_model1_namespace::model_model1 stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_model1_gammaOmega.cc b/src/stanExports_model1_gammaOmega.cc new file mode 100644 index 0000000..a75883a --- /dev/null +++ b/src/stanExports_model1_gammaOmega.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_model1_gammaOmega.h" + +RCPP_MODULE(stan_fit4model1_gammaOmega_mod) { + + + class_ >("model_model1_gammaOmega") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_model1_gammaOmega.h b/src/stanExports_model1_gammaOmega.h new file mode 100644 index 0000000..b8f8955 --- /dev/null +++ b/src/stanExports_model1_gammaOmega.h @@ -0,0 +1,680 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_model1_gammaOmega_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_model1_gammaOmega"); + reader.add_event(88, 86, "end", "model_model1_gammaOmega"); + return reader; +} +#include +class model_model1_gammaOmega + : public stan::model::model_base_crtp { +private: + int K; + vector_d X1; + int J; + matrix_d X0; + int T_post; + matrix_d X0_pred; + vector_d vs; + matrix_d X0_std; + matrix_d X0_pred_std; + vector_d mean_X0; + vector_d sd_X0; + double mean_X1; + double sd_X1; + vector_d X1_std; + vector_d vs_std; +public: + model_model1_gammaOmega(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_model1_gammaOmega(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_model1_gammaOmega_namespace::model_model1_gammaOmega"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 22; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 1); + current_statement_begin__ = 23; + validate_non_negative_index("X1", "K", K); + context__.validate_dims("data initialization", "X1", "vector_d", context__.to_vec(K)); + X1 = Eigen::Matrix(K); + vals_r__ = context__.vals_r("X1"); + pos__ = 0; + size_t X1_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < X1_j_1_max__; ++j_1__) { + X1(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 24; + context__.validate_dims("data initialization", "J", "int", context__.to_vec()); + J = int(0); + vals_i__ = context__.vals_i("J"); + pos__ = 0; + J = vals_i__[pos__++]; + check_greater_or_equal(function__, "J", J, 0); + current_statement_begin__ = 25; + validate_non_negative_index("X0", "K", K); + validate_non_negative_index("X0", "J", J); + context__.validate_dims("data initialization", "X0", "matrix_d", context__.to_vec(K,J)); + X0 = Eigen::Matrix(K, J); + vals_r__ = context__.vals_r("X0"); + pos__ = 0; + size_t X0_j_2_max__ = J; + size_t X0_j_1_max__ = K; + for (size_t j_2__ = 0; j_2__ < X0_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X0_j_1_max__; ++j_1__) { + X0(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 26; + context__.validate_dims("data initialization", "T_post", "int", context__.to_vec()); + T_post = int(0); + vals_i__ = context__.vals_i("T_post"); + pos__ = 0; + T_post = vals_i__[pos__++]; + check_greater_or_equal(function__, "T_post", T_post, 1); + current_statement_begin__ = 27; + validate_non_negative_index("X0_pred", "T_post", T_post); + validate_non_negative_index("X0_pred", "J", J); + context__.validate_dims("data initialization", "X0_pred", "matrix_d", context__.to_vec(T_post,J)); + X0_pred = Eigen::Matrix(T_post, J); + vals_r__ = context__.vals_r("X0_pred"); + pos__ = 0; + size_t X0_pred_j_2_max__ = J; + size_t X0_pred_j_1_max__ = T_post; + for (size_t j_2__ = 0; j_2__ < X0_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X0_pred_j_1_max__; ++j_1__) { + X0_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 28; + validate_non_negative_index("vs", "K", K); + context__.validate_dims("data initialization", "vs", "vector_d", context__.to_vec(K)); + vs = Eigen::Matrix(K); + vals_r__ = context__.vals_r("vs"); + pos__ = 0; + size_t vs_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < vs_j_1_max__; ++j_1__) { + vs(j_1__) = vals_r__[pos__++]; + } + check_greater_or_equal(function__, "vs", vs, 0); + // initialize transformed data variables + current_statement_begin__ = 33; + validate_non_negative_index("X0_std", "K", K); + validate_non_negative_index("X0_std", "J", J); + X0_std = Eigen::Matrix(K, J); + stan::math::fill(X0_std, DUMMY_VAR__); + current_statement_begin__ = 34; + validate_non_negative_index("X0_pred_std", "T_post", T_post); + validate_non_negative_index("X0_pred_std", "J", J); + X0_pred_std = Eigen::Matrix(T_post, J); + stan::math::fill(X0_pred_std, DUMMY_VAR__); + current_statement_begin__ = 35; + validate_non_negative_index("mean_X0", "J", J); + mean_X0 = Eigen::Matrix(J); + stan::math::fill(mean_X0, DUMMY_VAR__); + current_statement_begin__ = 36; + validate_non_negative_index("sd_X0", "J", J); + sd_X0 = Eigen::Matrix(J); + stan::math::fill(sd_X0, DUMMY_VAR__); + current_statement_begin__ = 37; + mean_X1 = double(0); + stan::math::fill(mean_X1, DUMMY_VAR__); + stan::math::assign(mean_X1,mean(X1)); + current_statement_begin__ = 38; + sd_X1 = double(0); + stan::math::fill(sd_X1, DUMMY_VAR__); + stan::math::assign(sd_X1,sd(X1)); + current_statement_begin__ = 39; + validate_non_negative_index("X1_std", "K", K); + X1_std = Eigen::Matrix(K); + stan::math::fill(X1_std, DUMMY_VAR__); + stan::math::assign(X1_std,divide(subtract(X1, mean_X1), sd_X1)); + current_statement_begin__ = 40; + validate_non_negative_index("vs_std", "K", K); + vs_std = Eigen::Matrix(K); + stan::math::fill(vs_std, DUMMY_VAR__); + // execute transformed data statements + current_statement_begin__ = 41; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 42; + stan::model::assign(vs_std, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + pow(sd(stan::model::rvalue(X0, stan::model::cons_list(stan::model::index_uni(k), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X0")), -(1)), + "assigning variable vs_std"); + } + current_statement_begin__ = 44; + for (int j = 1; j <= J; ++j) { + current_statement_begin__ = 45; + stan::model::assign(mean_X0, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + mean(stan::model::rvalue(X0, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "X0")), + "assigning variable mean_X0"); + current_statement_begin__ = 46; + stan::model::assign(sd_X0, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + sd(stan::model::rvalue(X0, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "X0")), + "assigning variable sd_X0"); + current_statement_begin__ = 47; + stan::model::assign(X0_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X0, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "X0"), get_base1(mean_X0, j, "mean_X0", 1)), get_base1(sd_X0, j, "sd_X0", 1)), + "assigning variable X0_std"); + current_statement_begin__ = 48; + stan::model::assign(X0_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X0_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "X0_pred"), get_base1(mean_X0, j, "mean_X0", 1)), get_base1(sd_X0, j, "sd_X0", 1)), + "assigning variable X0_pred_std"); + } + // validate transformed data + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 53; + num_params_r__ += 1; + current_statement_begin__ = 54; + validate_non_negative_index("w", "J", J); + num_params_r__ += (J - 1); + current_statement_begin__ = 55; + validate_non_negative_index("gamma", "K", K); + num_params_r__ += (K - 1); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_model1_gammaOmega() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 53; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec()); + double sigma(0); + sigma = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 54; + if (!(context__.contains_r("w"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable w missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("w"); + pos__ = 0U; + validate_non_negative_index("w", "J", J); + context__.validate_dims("parameter initialization", "w", "vector_d", context__.to_vec(J)); + Eigen::Matrix w(J); + size_t w_j_1_max__ = J; + for (size_t j_1__ = 0; j_1__ < w_j_1_max__; ++j_1__) { + w(j_1__) = vals_r__[pos__++]; + } + try { + writer__.simplex_unconstrain(w); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable w: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 55; + if (!(context__.contains_r("gamma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable gamma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("gamma"); + pos__ = 0U; + validate_non_negative_index("gamma", "K", K); + context__.validate_dims("parameter initialization", "gamma", "vector_d", context__.to_vec(K)); + Eigen::Matrix gamma(K); + size_t gamma_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + gamma(j_1__) = vals_r__[pos__++]; + } + try { + writer__.simplex_unconstrain(gamma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable gamma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 53; + local_scalar_t__ sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.scalar_lb_constrain(0, lp__); + else + sigma = in__.scalar_lb_constrain(0); + current_statement_begin__ = 54; + Eigen::Matrix w; + (void) w; // dummy to suppress unused var warning + if (jacobian__) + w = in__.simplex_constrain(J, lp__); + else + w = in__.simplex_constrain(J); + current_statement_begin__ = 55; + Eigen::Matrix gamma; + (void) gamma; // dummy to suppress unused var warning + if (jacobian__) + gamma = in__.simplex_constrain(K, lp__); + else + gamma = in__.simplex_constrain(K); + // transformed parameters + current_statement_begin__ = 61; + validate_non_negative_index("Omega", "K", K); + Eigen::Matrix Omega(K); + stan::math::initialize(Omega, DUMMY_VAR__); + stan::math::fill(Omega, DUMMY_VAR__); + current_statement_begin__ = 62; + validate_non_negative_index("Gamma", "K", K); + Eigen::Matrix Gamma(K); + stan::math::initialize(Gamma, DUMMY_VAR__); + stan::math::fill(Gamma, DUMMY_VAR__); + // transformed parameters block statements + current_statement_begin__ = 63; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 64; + stan::model::assign(Gamma, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + pow(get_base1(gamma, k, "gamma", 1), -(1)), + "assigning variable Gamma"); + } + current_statement_begin__ = 66; + stan::math::assign(Omega, multiply(sigma, Gamma)); + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 61; + size_t Omega_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < Omega_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(Omega(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: Omega" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable Omega: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 62; + size_t Gamma_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < Gamma_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(Gamma(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: Gamma" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable Gamma: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + // model body + current_statement_begin__ = 71; + lp_accum__.add(normal_log(sigma, 0, 1)); + current_statement_begin__ = 72; + lp_accum__.add(dirichlet_log(gamma, vs_std)); + current_statement_begin__ = 73; + lp_accum__.add(normal_log(X1_std, multiply(X0_std, w), Omega)); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("sigma"); + names__.push_back("w"); + names__.push_back("gamma"); + names__.push_back("Omega"); + names__.push_back("Gamma"); + names__.push_back("X1_sim"); + names__.push_back("X1_pred"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(J); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(T_post); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_model1_gammaOmega_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + double sigma = in__.scalar_lb_constrain(0); + vars__.push_back(sigma); + Eigen::Matrix w = in__.simplex_constrain(J); + size_t w_j_1_max__ = J; + for (size_t j_1__ = 0; j_1__ < w_j_1_max__; ++j_1__) { + vars__.push_back(w(j_1__)); + } + Eigen::Matrix gamma = in__.simplex_constrain(K); + size_t gamma_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + vars__.push_back(gamma(j_1__)); + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + // declare and define transformed parameters + current_statement_begin__ = 61; + validate_non_negative_index("Omega", "K", K); + Eigen::Matrix Omega(K); + stan::math::initialize(Omega, DUMMY_VAR__); + stan::math::fill(Omega, DUMMY_VAR__); + current_statement_begin__ = 62; + validate_non_negative_index("Gamma", "K", K); + Eigen::Matrix Gamma(K); + stan::math::initialize(Gamma, DUMMY_VAR__); + stan::math::fill(Gamma, DUMMY_VAR__); + // do transformed parameters statements + current_statement_begin__ = 63; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 64; + stan::model::assign(Gamma, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + pow(get_base1(gamma, k, "gamma", 1), -(1)), + "assigning variable Gamma"); + } + current_statement_begin__ = 66; + stan::math::assign(Omega, multiply(sigma, Gamma)); + if (!include_gqs__ && !include_tparams__) return; + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + // write transformed parameters + if (include_tparams__) { + size_t Omega_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < Omega_j_1_max__; ++j_1__) { + vars__.push_back(Omega(j_1__)); + } + size_t Gamma_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < Gamma_j_1_max__; ++j_1__) { + vars__.push_back(Gamma(j_1__)); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 77; + validate_non_negative_index("X1_sim", "K", K); + Eigen::Matrix X1_sim(K); + stan::math::initialize(X1_sim, DUMMY_VAR__); + stan::math::fill(X1_sim, DUMMY_VAR__); + current_statement_begin__ = 78; + validate_non_negative_index("X1_pred", "T_post", T_post); + Eigen::Matrix X1_pred(T_post); + stan::math::initialize(X1_pred, DUMMY_VAR__); + stan::math::fill(X1_pred, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 79; + for (int i = 1; i <= K; ++i) { + current_statement_begin__ = 80; + stan::model::assign(X1_sim, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + ((normal_rng(multiply(stan::model::rvalue(X0_std, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X0_std"), w), sigma, base_rng__) * sd_X1) + mean_X1), + "assigning variable X1_sim"); + } + current_statement_begin__ = 83; + for (int j = 1; j <= T_post; ++j) { + current_statement_begin__ = 84; + stan::model::assign(X1_pred, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + ((normal_rng(multiply(stan::model::rvalue(X0_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X0_pred_std"), w), sigma, base_rng__) * sd_X1) + mean_X1), + "assigning variable X1_pred"); + } + // validate, write generated quantities + current_statement_begin__ = 77; + size_t X1_sim_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < X1_sim_j_1_max__; ++j_1__) { + vars__.push_back(X1_sim(j_1__)); + } + current_statement_begin__ = 78; + size_t X1_pred_j_1_max__ = T_post; + for (size_t j_1__ = 0; j_1__ < X1_pred_j_1_max__; ++j_1__) { + vars__.push_back(X1_pred(j_1__)); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_model1_gammaOmega"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t w_j_1_max__ = J; + for (size_t j_1__ = 0; j_1__ < w_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t gamma_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t Omega_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < Omega_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "Omega" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t Gamma_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < Gamma_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "Gamma" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t X1_sim_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < X1_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "X1_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t X1_pred_j_1_max__ = T_post; + for (size_t j_1__ = 0; j_1__ < X1_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "X1_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t w_j_1_max__ = (J - 1); + for (size_t j_1__ = 0; j_1__ < w_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t gamma_j_1_max__ = (K - 1); + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t Omega_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < Omega_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "Omega" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t Gamma_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < Gamma_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "Gamma" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t X1_sim_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < X1_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "X1_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t X1_pred_j_1_max__ = T_post; + for (size_t j_1__ = 0; j_1__ < X1_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "X1_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } +}; // model +} // namespace +typedef model_model1_gammaOmega_namespace::model_model1_gammaOmega stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_model2.cc b/src/stanExports_model2.cc new file mode 100644 index 0000000..c2c4cfe --- /dev/null +++ b/src/stanExports_model2.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_model2.h" + +RCPP_MODULE(stan_fit4model2_mod) { + + + class_ >("model_model2") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_model2.h b/src/stanExports_model2.h new file mode 100644 index 0000000..234bb02 --- /dev/null +++ b/src/stanExports_model2.h @@ -0,0 +1,722 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_model2_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_model2"); + reader.add_event(90, 88, "end", "model_model2"); + return reader; +} +#include +class model_model2 + : public stan::model::model_base_crtp { +private: + int N; + vector_d y; + int K; + matrix_d X; + int N_pred; + matrix_d X_pred; + matrix_d X_std; + matrix_d X_pred_std; + vector_d mean_X; + vector_d sd_X; + double mean_y; + double sd_y; + std::vector time; + vector_d y_std; + int sumN; +public: + model_model2(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_model2(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_model2_namespace::model_model2"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 20; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 21; + validate_non_negative_index("y", "N", N); + context__.validate_dims("data initialization", "y", "vector_d", context__.to_vec(N)); + y = Eigen::Matrix(N); + vals_r__ = context__.vals_r("y"); + pos__ = 0; + size_t y_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_j_1_max__; ++j_1__) { + y(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 22; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 0); + current_statement_begin__ = 23; + validate_non_negative_index("X", "N", N); + validate_non_negative_index("X", "K", K); + context__.validate_dims("data initialization", "X", "matrix_d", context__.to_vec(N,K)); + X = Eigen::Matrix(N, K); + vals_r__ = context__.vals_r("X"); + pos__ = 0; + size_t X_j_2_max__ = K; + size_t X_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < X_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_j_1_max__; ++j_1__) { + X(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 24; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 25; + validate_non_negative_index("X_pred", "N_pred", N_pred); + validate_non_negative_index("X_pred", "K", K); + context__.validate_dims("data initialization", "X_pred", "matrix_d", context__.to_vec(N_pred,K)); + X_pred = Eigen::Matrix(N_pred, K); + vals_r__ = context__.vals_r("X_pred"); + pos__ = 0; + size_t X_pred_j_2_max__ = K; + size_t X_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < X_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_pred_j_1_max__; ++j_1__) { + X_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + // initialize transformed data variables + current_statement_begin__ = 29; + validate_non_negative_index("X_std", "N", N); + validate_non_negative_index("X_std", "K", K); + X_std = Eigen::Matrix(N, K); + stan::math::fill(X_std, DUMMY_VAR__); + current_statement_begin__ = 30; + validate_non_negative_index("X_pred_std", "N_pred", N_pred); + validate_non_negative_index("X_pred_std", "K", K); + X_pred_std = Eigen::Matrix(N_pred, K); + stan::math::fill(X_pred_std, DUMMY_VAR__); + current_statement_begin__ = 31; + validate_non_negative_index("mean_X", "K", K); + mean_X = Eigen::Matrix(K); + stan::math::fill(mean_X, DUMMY_VAR__); + current_statement_begin__ = 32; + validate_non_negative_index("sd_X", "K", K); + sd_X = Eigen::Matrix(K); + stan::math::fill(sd_X, DUMMY_VAR__); + current_statement_begin__ = 33; + mean_y = double(0); + stan::math::fill(mean_y, DUMMY_VAR__); + stan::math::assign(mean_y,mean(y)); + current_statement_begin__ = 34; + sd_y = double(0); + stan::math::fill(sd_y, DUMMY_VAR__); + stan::math::assign(sd_y,sd(y)); + current_statement_begin__ = 35; + validate_non_negative_index("time", "(N + N_pred)", (N + N_pred)); + time = std::vector((N + N_pred), double(0)); + stan::math::fill(time, DUMMY_VAR__); + current_statement_begin__ = 36; + validate_non_negative_index("y_std", "N", N); + y_std = Eigen::Matrix(N); + stan::math::fill(y_std, DUMMY_VAR__); + stan::math::assign(y_std,divide(subtract(y, mean_y), sd_y)); + current_statement_begin__ = 37; + sumN = int(0); + stan::math::fill(sumN, std::numeric_limits::min()); + stan::math::assign(sumN,(N + N_pred)); + // execute transformed data statements + current_statement_begin__ = 39; + for (int t = 1; t <= sumN; ++t) { + current_statement_begin__ = 40; + stan::model::assign(time, + stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), + t, + "assigning variable time"); + } + current_statement_begin__ = 43; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 44; + stan::model::assign(mean_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + mean(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable mean_X"); + current_statement_begin__ = 45; + stan::model::assign(sd_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + sd(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable sd_X"); + current_statement_begin__ = 46; + stan::model::assign(X_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_std"); + current_statement_begin__ = 47; + stan::model::assign(X_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X_pred"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_pred_std"); + } + // validate transformed data + current_statement_begin__ = 37; + check_greater_or_equal(function__, "sumN", sumN, 1); + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 52; + num_params_r__ += 1; + current_statement_begin__ = 53; + validate_non_negative_index("beta", "K", K); + num_params_r__ += (K - 1); + current_statement_begin__ = 54; + num_params_r__ += 1; + current_statement_begin__ = 55; + num_params_r__ += 1; + current_statement_begin__ = 56; + validate_non_negative_index("eta", "sumN", sumN); + num_params_r__ += sumN; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_model2() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 52; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec()); + double sigma(0); + sigma = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 53; + if (!(context__.contains_r("beta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta"); + pos__ = 0U; + validate_non_negative_index("beta", "K", K); + context__.validate_dims("parameter initialization", "beta", "vector_d", context__.to_vec(K)); + Eigen::Matrix beta(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + beta(j_1__) = vals_r__[pos__++]; + } + try { + writer__.simplex_unconstrain(beta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 54; + if (!(context__.contains_r("rho"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable rho missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("rho"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "rho", "double", context__.to_vec()); + double rho(0); + rho = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, rho); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable rho: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 55; + if (!(context__.contains_r("alpha"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable alpha missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("alpha"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "alpha", "double", context__.to_vec()); + double alpha(0); + alpha = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, alpha); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable alpha: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 56; + if (!(context__.contains_r("eta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable eta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("eta"); + pos__ = 0U; + validate_non_negative_index("eta", "sumN", sumN); + context__.validate_dims("parameter initialization", "eta", "vector_d", context__.to_vec(sumN)); + Eigen::Matrix eta(sumN); + size_t eta_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + eta(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(eta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable eta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 52; + local_scalar_t__ sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.scalar_lb_constrain(0, lp__); + else + sigma = in__.scalar_lb_constrain(0); + current_statement_begin__ = 53; + Eigen::Matrix beta; + (void) beta; // dummy to suppress unused var warning + if (jacobian__) + beta = in__.simplex_constrain(K, lp__); + else + beta = in__.simplex_constrain(K); + current_statement_begin__ = 54; + local_scalar_t__ rho; + (void) rho; // dummy to suppress unused var warning + if (jacobian__) + rho = in__.scalar_lb_constrain(0, lp__); + else + rho = in__.scalar_lb_constrain(0); + current_statement_begin__ = 55; + local_scalar_t__ alpha; + (void) alpha; // dummy to suppress unused var warning + if (jacobian__) + alpha = in__.scalar_lb_constrain(0, lp__); + else + alpha = in__.scalar_lb_constrain(0); + current_statement_begin__ = 56; + Eigen::Matrix eta; + (void) eta; // dummy to suppress unused var warning + if (jacobian__) + eta = in__.vector_constrain(sumN, lp__); + else + eta = in__.vector_constrain(sumN); + // transformed parameters + current_statement_begin__ = 60; + validate_non_negative_index("f", "sumN", sumN); + Eigen::Matrix f(sumN); + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f, DUMMY_VAR__); + // transformed parameters block statements + { + current_statement_begin__ = 62; + validate_non_negative_index("K_matrix", "sumN", sumN); + validate_non_negative_index("K_matrix", "sumN", sumN); + Eigen::Matrix K_matrix(sumN, sumN); + stan::math::initialize(K_matrix, DUMMY_VAR__); + stan::math::fill(K_matrix, DUMMY_VAR__); + stan::math::assign(K_matrix,add(cov_exp_quad(time, alpha, rho), diag_matrix(rep_vector(1e-9, sumN)))); + current_statement_begin__ = 64; + validate_non_negative_index("L_K", "sumN", sumN); + validate_non_negative_index("L_K", "sumN", sumN); + Eigen::Matrix L_K(sumN, sumN); + stan::math::initialize(L_K, DUMMY_VAR__); + stan::math::fill(L_K, DUMMY_VAR__); + stan::math::assign(L_K,cholesky_decompose(K_matrix)); + current_statement_begin__ = 65; + stan::math::assign(f, multiply(L_K, eta)); + } + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 60; + size_t f_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(f(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: f" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable f: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + // model body + current_statement_begin__ = 71; + lp_accum__.add(normal_log(rho, 0, 3)); + current_statement_begin__ = 72; + lp_accum__.add(normal_log(alpha, 0, 1)); + current_statement_begin__ = 73; + lp_accum__.add(normal_log(sigma, 0, 1)); + current_statement_begin__ = 74; + lp_accum__.add(normal_log(eta, 0, 1)); + current_statement_begin__ = 75; + lp_accum__.add(normal_log(y_std, add(multiply(X_std, beta), stan::model::rvalue(f, stan::model::cons_list(stan::model::index_min_max(1, N), stan::model::nil_index_list()), "f")), sigma)); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("sigma"); + names__.push_back("beta"); + names__.push_back("rho"); + names__.push_back("alpha"); + names__.push_back("eta"); + names__.push_back("f"); + names__.push_back("y_sim"); + names__.push_back("y_pred"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(sumN); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(sumN); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_model2_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + double sigma = in__.scalar_lb_constrain(0); + vars__.push_back(sigma); + Eigen::Matrix beta = in__.simplex_constrain(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + vars__.push_back(beta(j_1__)); + } + double rho = in__.scalar_lb_constrain(0); + vars__.push_back(rho); + double alpha = in__.scalar_lb_constrain(0); + vars__.push_back(alpha); + Eigen::Matrix eta = in__.vector_constrain(sumN); + size_t eta_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + vars__.push_back(eta(j_1__)); + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + // declare and define transformed parameters + current_statement_begin__ = 60; + validate_non_negative_index("f", "sumN", sumN); + Eigen::Matrix f(sumN); + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f, DUMMY_VAR__); + // do transformed parameters statements + { + current_statement_begin__ = 62; + validate_non_negative_index("K_matrix", "sumN", sumN); + validate_non_negative_index("K_matrix", "sumN", sumN); + Eigen::Matrix K_matrix(sumN, sumN); + stan::math::initialize(K_matrix, DUMMY_VAR__); + stan::math::fill(K_matrix, DUMMY_VAR__); + stan::math::assign(K_matrix,add(cov_exp_quad(time, alpha, rho), diag_matrix(rep_vector(1e-9, sumN)))); + current_statement_begin__ = 64; + validate_non_negative_index("L_K", "sumN", sumN); + validate_non_negative_index("L_K", "sumN", sumN); + Eigen::Matrix L_K(sumN, sumN); + stan::math::initialize(L_K, DUMMY_VAR__); + stan::math::fill(L_K, DUMMY_VAR__); + stan::math::assign(L_K,cholesky_decompose(K_matrix)); + current_statement_begin__ = 65; + stan::math::assign(f, multiply(L_K, eta)); + } + if (!include_gqs__ && !include_tparams__) return; + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + // write transformed parameters + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + vars__.push_back(f(j_1__)); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 80; + validate_non_negative_index("y_sim", "N", N); + Eigen::Matrix y_sim(N); + stan::math::initialize(y_sim, DUMMY_VAR__); + stan::math::fill(y_sim, DUMMY_VAR__); + current_statement_begin__ = 81; + validate_non_negative_index("y_pred", "N_pred", N_pred); + Eigen::Matrix y_pred(N_pred); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 82; + for (int i = 1; i <= N; ++i) { + current_statement_begin__ = 83; + stan::model::assign(y_sim, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + ((normal_rng((multiply(stan::model::rvalue(X_std, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_std"), beta) + get_base1(f, i, "f", 1)), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable y_sim"); + } + current_statement_begin__ = 85; + for (int j = 1; j <= N_pred; ++j) { + current_statement_begin__ = 86; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + ((normal_rng((multiply(stan::model::rvalue(X_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_pred_std"), beta) + get_base1(f, (N + j), "f", 1)), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable y_pred"); + } + // validate, write generated quantities + current_statement_begin__ = 80; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + vars__.push_back(y_sim(j_1__)); + } + current_statement_begin__ = 81; + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + vars__.push_back(y_pred(j_1__)); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_model2"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "rho"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha"; + param_names__.push_back(param_name_stream__.str()); + size_t eta_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "eta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "f" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_j_1_max__ = (K - 1); + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "rho"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha"; + param_names__.push_back(param_name_stream__.str()); + size_t eta_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "eta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "f" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } +}; // model +} // namespace +typedef model_model2_namespace::model_model2 stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_model3.cc b/src/stanExports_model3.cc new file mode 100644 index 0000000..fbf8e8c --- /dev/null +++ b/src/stanExports_model3.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_model3.h" + +RCPP_MODULE(stan_fit4model3_mod) { + + + class_ >("model_model3") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_model3.h b/src/stanExports_model3.h new file mode 100644 index 0000000..7a3d1da --- /dev/null +++ b/src/stanExports_model3.h @@ -0,0 +1,634 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_model3_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_model3"); + reader.add_event(108, 106, "end", "model_model3"); + return reader; +} +#include +class model_model3 + : public stan::model::model_base_crtp { +private: + int N; + vector_d y; + int K; + matrix_d X; + int M_K; + matrix_d M; + int N_pred; + matrix_d X_pred; + matrix_d M_pred; + matrix_d X_std; + matrix_d X_pred_std; + vector_d mean_X; + vector_d sd_X; + matrix_d M_std; + matrix_d M_pred_std; + vector_d mean_M; + vector_d sd_M; + double mean_y; + double sd_y; + vector_d y_std; + int sumN; +public: + model_model3(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_model3(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_model3_namespace::model_model3"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 21; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 22; + validate_non_negative_index("y", "N", N); + context__.validate_dims("data initialization", "y", "vector_d", context__.to_vec(N)); + y = Eigen::Matrix(N); + vals_r__ = context__.vals_r("y"); + pos__ = 0; + size_t y_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_j_1_max__; ++j_1__) { + y(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 23; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 0); + current_statement_begin__ = 24; + validate_non_negative_index("X", "N", N); + validate_non_negative_index("X", "K", K); + context__.validate_dims("data initialization", "X", "matrix_d", context__.to_vec(N,K)); + X = Eigen::Matrix(N, K); + vals_r__ = context__.vals_r("X"); + pos__ = 0; + size_t X_j_2_max__ = K; + size_t X_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < X_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_j_1_max__; ++j_1__) { + X(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 25; + context__.validate_dims("data initialization", "M_K", "int", context__.to_vec()); + M_K = int(0); + vals_i__ = context__.vals_i("M_K"); + pos__ = 0; + M_K = vals_i__[pos__++]; + check_greater_or_equal(function__, "M_K", M_K, 0); + current_statement_begin__ = 26; + validate_non_negative_index("M", "N", N); + validate_non_negative_index("M", "M_K", M_K); + context__.validate_dims("data initialization", "M", "matrix_d", context__.to_vec(N,M_K)); + M = Eigen::Matrix(N, M_K); + vals_r__ = context__.vals_r("M"); + pos__ = 0; + size_t M_j_2_max__ = M_K; + size_t M_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < M_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < M_j_1_max__; ++j_1__) { + M(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 27; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 28; + validate_non_negative_index("X_pred", "N_pred", N_pred); + validate_non_negative_index("X_pred", "K", K); + context__.validate_dims("data initialization", "X_pred", "matrix_d", context__.to_vec(N_pred,K)); + X_pred = Eigen::Matrix(N_pred, K); + vals_r__ = context__.vals_r("X_pred"); + pos__ = 0; + size_t X_pred_j_2_max__ = K; + size_t X_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < X_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_pred_j_1_max__; ++j_1__) { + X_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 29; + validate_non_negative_index("M_pred", "N_pred", N_pred); + validate_non_negative_index("M_pred", "M_K", M_K); + context__.validate_dims("data initialization", "M_pred", "matrix_d", context__.to_vec(N_pred,M_K)); + M_pred = Eigen::Matrix(N_pred, M_K); + vals_r__ = context__.vals_r("M_pred"); + pos__ = 0; + size_t M_pred_j_2_max__ = M_K; + size_t M_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < M_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < M_pred_j_1_max__; ++j_1__) { + M_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + // initialize transformed data variables + current_statement_begin__ = 33; + validate_non_negative_index("X_std", "N", N); + validate_non_negative_index("X_std", "K", K); + X_std = Eigen::Matrix(N, K); + stan::math::fill(X_std, DUMMY_VAR__); + current_statement_begin__ = 34; + validate_non_negative_index("X_pred_std", "N_pred", N_pred); + validate_non_negative_index("X_pred_std", "K", K); + X_pred_std = Eigen::Matrix(N_pred, K); + stan::math::fill(X_pred_std, DUMMY_VAR__); + current_statement_begin__ = 35; + validate_non_negative_index("mean_X", "K", K); + mean_X = Eigen::Matrix(K); + stan::math::fill(mean_X, DUMMY_VAR__); + current_statement_begin__ = 36; + validate_non_negative_index("sd_X", "K", K); + sd_X = Eigen::Matrix(K); + stan::math::fill(sd_X, DUMMY_VAR__); + current_statement_begin__ = 37; + validate_non_negative_index("M_std", "N", N); + validate_non_negative_index("M_std", "M_K", M_K); + M_std = Eigen::Matrix(N, M_K); + stan::math::fill(M_std, DUMMY_VAR__); + current_statement_begin__ = 38; + validate_non_negative_index("M_pred_std", "N_pred", N_pred); + validate_non_negative_index("M_pred_std", "M_K", M_K); + M_pred_std = Eigen::Matrix(N_pred, M_K); + stan::math::fill(M_pred_std, DUMMY_VAR__); + current_statement_begin__ = 39; + validate_non_negative_index("mean_M", "M_K", M_K); + mean_M = Eigen::Matrix(M_K); + stan::math::fill(mean_M, DUMMY_VAR__); + current_statement_begin__ = 40; + validate_non_negative_index("sd_M", "M_K", M_K); + sd_M = Eigen::Matrix(M_K); + stan::math::fill(sd_M, DUMMY_VAR__); + current_statement_begin__ = 41; + mean_y = double(0); + stan::math::fill(mean_y, DUMMY_VAR__); + stan::math::assign(mean_y,mean(y)); + current_statement_begin__ = 42; + sd_y = double(0); + stan::math::fill(sd_y, DUMMY_VAR__); + stan::math::assign(sd_y,sd(y)); + current_statement_begin__ = 44; + validate_non_negative_index("y_std", "N", N); + y_std = Eigen::Matrix(N); + stan::math::fill(y_std, DUMMY_VAR__); + stan::math::assign(y_std,divide(subtract(y, mean_y), sd_y)); + current_statement_begin__ = 45; + sumN = int(0); + stan::math::fill(sumN, std::numeric_limits::min()); + stan::math::assign(sumN,(N + N_pred)); + // execute transformed data statements + current_statement_begin__ = 51; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 52; + stan::model::assign(mean_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + mean(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable mean_X"); + current_statement_begin__ = 53; + stan::model::assign(sd_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + sd(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable sd_X"); + current_statement_begin__ = 54; + stan::model::assign(X_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_std"); + current_statement_begin__ = 55; + stan::model::assign(X_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X_pred"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_pred_std"); + } + current_statement_begin__ = 58; + for (int j = 1; j <= M_K; ++j) { + current_statement_begin__ = 59; + stan::model::assign(mean_M, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + mean(stan::model::rvalue(M, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M")), + "assigning variable mean_M"); + current_statement_begin__ = 60; + stan::model::assign(sd_M, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + sd(stan::model::rvalue(M, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M")), + "assigning variable sd_M"); + current_statement_begin__ = 61; + stan::model::assign(M_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(M, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M"), get_base1(mean_M, j, "mean_M", 1)), get_base1(sd_M, j, "sd_M", 1)), + "assigning variable M_std"); + current_statement_begin__ = 62; + stan::model::assign(M_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(M_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M_pred"), get_base1(mean_M, j, "mean_M", 1)), get_base1(sd_M, j, "sd_M", 1)), + "assigning variable M_pred_std"); + } + // validate transformed data + current_statement_begin__ = 45; + check_greater_or_equal(function__, "sumN", sumN, 1); + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 68; + num_params_r__ += 1; + current_statement_begin__ = 69; + validate_non_negative_index("beta", "K", K); + num_params_r__ += (K - 1); + current_statement_begin__ = 73; + validate_non_negative_index("gamma", "M_K", M_K); + num_params_r__ += M_K; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_model3() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 68; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec()); + double sigma(0); + sigma = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 69; + if (!(context__.contains_r("beta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta"); + pos__ = 0U; + validate_non_negative_index("beta", "K", K); + context__.validate_dims("parameter initialization", "beta", "vector_d", context__.to_vec(K)); + Eigen::Matrix beta(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + beta(j_1__) = vals_r__[pos__++]; + } + try { + writer__.simplex_unconstrain(beta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 73; + if (!(context__.contains_r("gamma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable gamma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("gamma"); + pos__ = 0U; + validate_non_negative_index("gamma", "M_K", M_K); + context__.validate_dims("parameter initialization", "gamma", "vector_d", context__.to_vec(M_K)); + Eigen::Matrix gamma(M_K); + size_t gamma_j_1_max__ = M_K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + gamma(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(gamma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable gamma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 68; + local_scalar_t__ sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.scalar_lb_constrain(0, lp__); + else + sigma = in__.scalar_lb_constrain(0); + current_statement_begin__ = 69; + Eigen::Matrix beta; + (void) beta; // dummy to suppress unused var warning + if (jacobian__) + beta = in__.simplex_constrain(K, lp__); + else + beta = in__.simplex_constrain(K); + current_statement_begin__ = 73; + Eigen::Matrix gamma; + (void) gamma; // dummy to suppress unused var warning + if (jacobian__) + gamma = in__.vector_constrain(M_K, lp__); + else + gamma = in__.vector_constrain(M_K); + // model body + current_statement_begin__ = 90; + lp_accum__.add(normal_log(sigma, 0, 1)); + current_statement_begin__ = 92; + lp_accum__.add(normal_log(gamma, 0, 1)); + current_statement_begin__ = 93; + lp_accum__.add(normal_log(y_std, add(multiply(X_std, beta), multiply(M_std, gamma)), sigma)); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("sigma"); + names__.push_back("beta"); + names__.push_back("gamma"); + names__.push_back("y_sim"); + names__.push_back("y_pred"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(M_K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_model3_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + double sigma = in__.scalar_lb_constrain(0); + vars__.push_back(sigma); + Eigen::Matrix beta = in__.simplex_constrain(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + vars__.push_back(beta(j_1__)); + } + Eigen::Matrix gamma = in__.vector_constrain(M_K); + size_t gamma_j_1_max__ = M_K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + vars__.push_back(gamma(j_1__)); + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + if (!include_gqs__ && !include_tparams__) return; + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 98; + validate_non_negative_index("y_sim", "N", N); + Eigen::Matrix y_sim(N); + stan::math::initialize(y_sim, DUMMY_VAR__); + stan::math::fill(y_sim, DUMMY_VAR__); + current_statement_begin__ = 99; + validate_non_negative_index("y_pred", "N_pred", N_pred); + Eigen::Matrix y_pred(N_pred); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 100; + for (int i = 1; i <= N; ++i) { + current_statement_begin__ = 101; + stan::model::assign(y_sim, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + ((normal_rng((multiply(stan::model::rvalue(X_std, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_std"), beta) + multiply(stan::model::rvalue(M_std, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "M_std"), gamma)), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable y_sim"); + } + current_statement_begin__ = 103; + for (int j = 1; j <= N_pred; ++j) { + current_statement_begin__ = 104; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + ((normal_rng((multiply(stan::model::rvalue(X_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_pred_std"), beta) + multiply(stan::model::rvalue(M_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "M_pred_std"), gamma)), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable y_pred"); + } + // validate, write generated quantities + current_statement_begin__ = 98; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + vars__.push_back(y_sim(j_1__)); + } + current_statement_begin__ = 99; + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + vars__.push_back(y_pred(j_1__)); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_model3"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t gamma_j_1_max__ = M_K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_j_1_max__ = (K - 1); + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t gamma_j_1_max__ = M_K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } +}; // model +} // namespace +typedef model_model3_namespace::model_model3 stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_model4.cc b/src/stanExports_model4.cc new file mode 100644 index 0000000..51a2772 --- /dev/null +++ b/src/stanExports_model4.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_model4.h" + +RCPP_MODULE(stan_fit4model4_mod) { + + + class_ >("model_model4") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_model4.h b/src/stanExports_model4.h new file mode 100644 index 0000000..25d32d9 --- /dev/null +++ b/src/stanExports_model4.h @@ -0,0 +1,855 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_model4_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_model4"); + reader.add_event(111, 109, "end", "model_model4"); + return reader; +} +#include +class model_model4 + : public stan::model::model_base_crtp { +private: + int N; + vector_d y; + int K; + matrix_d X; + int M_K; + matrix_d M; + int N_pred; + matrix_d X_pred; + matrix_d M_pred; + matrix_d X_std; + matrix_d X_pred_std; + vector_d mean_X; + vector_d sd_X; + matrix_d M_std; + matrix_d M_pred_std; + vector_d mean_M; + vector_d sd_M; + double mean_y; + double sd_y; + std::vector time; + vector_d y_std; + int sumN; +public: + model_model4(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_model4(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_model4_namespace::model_model4"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 22; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 23; + validate_non_negative_index("y", "N", N); + context__.validate_dims("data initialization", "y", "vector_d", context__.to_vec(N)); + y = Eigen::Matrix(N); + vals_r__ = context__.vals_r("y"); + pos__ = 0; + size_t y_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_j_1_max__; ++j_1__) { + y(j_1__) = vals_r__[pos__++]; + } + current_statement_begin__ = 24; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 0); + current_statement_begin__ = 25; + validate_non_negative_index("X", "N", N); + validate_non_negative_index("X", "K", K); + context__.validate_dims("data initialization", "X", "matrix_d", context__.to_vec(N,K)); + X = Eigen::Matrix(N, K); + vals_r__ = context__.vals_r("X"); + pos__ = 0; + size_t X_j_2_max__ = K; + size_t X_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < X_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_j_1_max__; ++j_1__) { + X(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 26; + context__.validate_dims("data initialization", "M_K", "int", context__.to_vec()); + M_K = int(0); + vals_i__ = context__.vals_i("M_K"); + pos__ = 0; + M_K = vals_i__[pos__++]; + check_greater_or_equal(function__, "M_K", M_K, 0); + current_statement_begin__ = 27; + validate_non_negative_index("M", "N", N); + validate_non_negative_index("M", "M_K", M_K); + context__.validate_dims("data initialization", "M", "matrix_d", context__.to_vec(N,M_K)); + M = Eigen::Matrix(N, M_K); + vals_r__ = context__.vals_r("M"); + pos__ = 0; + size_t M_j_2_max__ = M_K; + size_t M_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < M_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < M_j_1_max__; ++j_1__) { + M(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 28; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 29; + validate_non_negative_index("X_pred", "N_pred", N_pred); + validate_non_negative_index("X_pred", "K", K); + context__.validate_dims("data initialization", "X_pred", "matrix_d", context__.to_vec(N_pred,K)); + X_pred = Eigen::Matrix(N_pred, K); + vals_r__ = context__.vals_r("X_pred"); + pos__ = 0; + size_t X_pred_j_2_max__ = K; + size_t X_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < X_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_pred_j_1_max__; ++j_1__) { + X_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 30; + validate_non_negative_index("M_pred", "N_pred", N_pred); + validate_non_negative_index("M_pred", "M_K", M_K); + context__.validate_dims("data initialization", "M_pred", "matrix_d", context__.to_vec(N_pred,M_K)); + M_pred = Eigen::Matrix(N_pred, M_K); + vals_r__ = context__.vals_r("M_pred"); + pos__ = 0; + size_t M_pred_j_2_max__ = M_K; + size_t M_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < M_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < M_pred_j_1_max__; ++j_1__) { + M_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + // initialize transformed data variables + current_statement_begin__ = 34; + validate_non_negative_index("X_std", "N", N); + validate_non_negative_index("X_std", "K", K); + X_std = Eigen::Matrix(N, K); + stan::math::fill(X_std, DUMMY_VAR__); + current_statement_begin__ = 35; + validate_non_negative_index("X_pred_std", "N_pred", N_pred); + validate_non_negative_index("X_pred_std", "K", K); + X_pred_std = Eigen::Matrix(N_pred, K); + stan::math::fill(X_pred_std, DUMMY_VAR__); + current_statement_begin__ = 36; + validate_non_negative_index("mean_X", "K", K); + mean_X = Eigen::Matrix(K); + stan::math::fill(mean_X, DUMMY_VAR__); + current_statement_begin__ = 37; + validate_non_negative_index("sd_X", "K", K); + sd_X = Eigen::Matrix(K); + stan::math::fill(sd_X, DUMMY_VAR__); + current_statement_begin__ = 38; + validate_non_negative_index("M_std", "N", N); + validate_non_negative_index("M_std", "M_K", M_K); + M_std = Eigen::Matrix(N, M_K); + stan::math::fill(M_std, DUMMY_VAR__); + current_statement_begin__ = 39; + validate_non_negative_index("M_pred_std", "N_pred", N_pred); + validate_non_negative_index("M_pred_std", "M_K", M_K); + M_pred_std = Eigen::Matrix(N_pred, M_K); + stan::math::fill(M_pred_std, DUMMY_VAR__); + current_statement_begin__ = 40; + validate_non_negative_index("mean_M", "M_K", M_K); + mean_M = Eigen::Matrix(M_K); + stan::math::fill(mean_M, DUMMY_VAR__); + current_statement_begin__ = 41; + validate_non_negative_index("sd_M", "M_K", M_K); + sd_M = Eigen::Matrix(M_K); + stan::math::fill(sd_M, DUMMY_VAR__); + current_statement_begin__ = 42; + mean_y = double(0); + stan::math::fill(mean_y, DUMMY_VAR__); + stan::math::assign(mean_y,mean(y)); + current_statement_begin__ = 43; + sd_y = double(0); + stan::math::fill(sd_y, DUMMY_VAR__); + stan::math::assign(sd_y,sd(y)); + current_statement_begin__ = 44; + validate_non_negative_index("time", "(N + N_pred)", (N + N_pred)); + time = std::vector((N + N_pred), double(0)); + stan::math::fill(time, DUMMY_VAR__); + current_statement_begin__ = 45; + validate_non_negative_index("y_std", "N", N); + y_std = Eigen::Matrix(N); + stan::math::fill(y_std, DUMMY_VAR__); + stan::math::assign(y_std,divide(subtract(y, mean_y), sd_y)); + current_statement_begin__ = 46; + sumN = int(0); + stan::math::fill(sumN, std::numeric_limits::min()); + stan::math::assign(sumN,(N + N_pred)); + // execute transformed data statements + current_statement_begin__ = 48; + for (int t = 1; t <= sumN; ++t) { + current_statement_begin__ = 49; + stan::model::assign(time, + stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), + t, + "assigning variable time"); + } + current_statement_begin__ = 52; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 53; + stan::model::assign(mean_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + mean(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable mean_X"); + current_statement_begin__ = 54; + stan::model::assign(sd_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + sd(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable sd_X"); + current_statement_begin__ = 55; + stan::model::assign(X_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_std"); + current_statement_begin__ = 56; + stan::model::assign(X_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X_pred"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_pred_std"); + } + current_statement_begin__ = 59; + for (int j = 1; j <= M_K; ++j) { + current_statement_begin__ = 60; + stan::model::assign(mean_M, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + mean(stan::model::rvalue(M, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M")), + "assigning variable mean_M"); + current_statement_begin__ = 61; + stan::model::assign(sd_M, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + sd(stan::model::rvalue(M, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M")), + "assigning variable sd_M"); + current_statement_begin__ = 62; + stan::model::assign(M_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(M, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M"), get_base1(mean_M, j, "mean_M", 1)), get_base1(sd_M, j, "sd_M", 1)), + "assigning variable M_std"); + current_statement_begin__ = 63; + stan::model::assign(M_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(M_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M_pred"), get_base1(mean_M, j, "mean_M", 1)), get_base1(sd_M, j, "sd_M", 1)), + "assigning variable M_pred_std"); + } + // validate transformed data + current_statement_begin__ = 46; + check_greater_or_equal(function__, "sumN", sumN, 1); + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 69; + num_params_r__ += 1; + current_statement_begin__ = 70; + validate_non_negative_index("beta", "K", K); + num_params_r__ += (K - 1); + current_statement_begin__ = 71; + num_params_r__ += 1; + current_statement_begin__ = 72; + num_params_r__ += 1; + current_statement_begin__ = 73; + validate_non_negative_index("eta", "sumN", sumN); + num_params_r__ += sumN; + current_statement_begin__ = 74; + validate_non_negative_index("gamma", "M_K", M_K); + num_params_r__ += M_K; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_model4() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 69; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec()); + double sigma(0); + sigma = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, sigma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 70; + if (!(context__.contains_r("beta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta"); + pos__ = 0U; + validate_non_negative_index("beta", "K", K); + context__.validate_dims("parameter initialization", "beta", "vector_d", context__.to_vec(K)); + Eigen::Matrix beta(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + beta(j_1__) = vals_r__[pos__++]; + } + try { + writer__.simplex_unconstrain(beta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 71; + if (!(context__.contains_r("rho"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable rho missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("rho"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "rho", "double", context__.to_vec()); + double rho(0); + rho = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, rho); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable rho: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 72; + if (!(context__.contains_r("alpha"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable alpha missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("alpha"); + pos__ = 0U; + context__.validate_dims("parameter initialization", "alpha", "double", context__.to_vec()); + double alpha(0); + alpha = vals_r__[pos__++]; + try { + writer__.scalar_lb_unconstrain(0, alpha); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable alpha: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 73; + if (!(context__.contains_r("eta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable eta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("eta"); + pos__ = 0U; + validate_non_negative_index("eta", "sumN", sumN); + context__.validate_dims("parameter initialization", "eta", "vector_d", context__.to_vec(sumN)); + Eigen::Matrix eta(sumN); + size_t eta_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + eta(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(eta); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable eta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + current_statement_begin__ = 74; + if (!(context__.contains_r("gamma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable gamma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("gamma"); + pos__ = 0U; + validate_non_negative_index("gamma", "M_K", M_K); + context__.validate_dims("parameter initialization", "gamma", "vector_d", context__.to_vec(M_K)); + Eigen::Matrix gamma(M_K); + size_t gamma_j_1_max__ = M_K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + gamma(j_1__) = vals_r__[pos__++]; + } + try { + writer__.vector_unconstrain(gamma); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable gamma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 69; + local_scalar_t__ sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.scalar_lb_constrain(0, lp__); + else + sigma = in__.scalar_lb_constrain(0); + current_statement_begin__ = 70; + Eigen::Matrix beta; + (void) beta; // dummy to suppress unused var warning + if (jacobian__) + beta = in__.simplex_constrain(K, lp__); + else + beta = in__.simplex_constrain(K); + current_statement_begin__ = 71; + local_scalar_t__ rho; + (void) rho; // dummy to suppress unused var warning + if (jacobian__) + rho = in__.scalar_lb_constrain(0, lp__); + else + rho = in__.scalar_lb_constrain(0); + current_statement_begin__ = 72; + local_scalar_t__ alpha; + (void) alpha; // dummy to suppress unused var warning + if (jacobian__) + alpha = in__.scalar_lb_constrain(0, lp__); + else + alpha = in__.scalar_lb_constrain(0); + current_statement_begin__ = 73; + Eigen::Matrix eta; + (void) eta; // dummy to suppress unused var warning + if (jacobian__) + eta = in__.vector_constrain(sumN, lp__); + else + eta = in__.vector_constrain(sumN); + current_statement_begin__ = 74; + Eigen::Matrix gamma; + (void) gamma; // dummy to suppress unused var warning + if (jacobian__) + gamma = in__.vector_constrain(M_K, lp__); + else + gamma = in__.vector_constrain(M_K); + // transformed parameters + current_statement_begin__ = 78; + validate_non_negative_index("f", "sumN", sumN); + Eigen::Matrix f(sumN); + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f, DUMMY_VAR__); + // transformed parameters block statements + { + current_statement_begin__ = 80; + validate_non_negative_index("K_matrix", "sumN", sumN); + validate_non_negative_index("K_matrix", "sumN", sumN); + Eigen::Matrix K_matrix(sumN, sumN); + stan::math::initialize(K_matrix, DUMMY_VAR__); + stan::math::fill(K_matrix, DUMMY_VAR__); + stan::math::assign(K_matrix,add(cov_exp_quad(time, alpha, rho), diag_matrix(rep_vector(1e-9, sumN)))); + current_statement_begin__ = 82; + validate_non_negative_index("L_K", "sumN", sumN); + validate_non_negative_index("L_K", "sumN", sumN); + Eigen::Matrix L_K(sumN, sumN); + stan::math::initialize(L_K, DUMMY_VAR__); + stan::math::fill(L_K, DUMMY_VAR__); + stan::math::assign(L_K,cholesky_decompose(K_matrix)); + current_statement_begin__ = 83; + stan::math::assign(f, multiply(L_K, eta)); + } + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 78; + size_t f_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(f(j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: f" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable f: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + // model body + current_statement_begin__ = 89; + lp_accum__.add(normal_log(rho, 0, 3)); + current_statement_begin__ = 90; + lp_accum__.add(normal_log(alpha, 0, 1)); + current_statement_begin__ = 91; + lp_accum__.add(normal_log(sigma, 0, 1)); + current_statement_begin__ = 92; + lp_accum__.add(normal_log(eta, 0, 1)); + current_statement_begin__ = 93; + lp_accum__.add(normal_log(gamma, 0, 1)); + current_statement_begin__ = 94; + lp_accum__.add(normal_log(y_std, add(add(multiply(X_std, beta), multiply(M_std, gamma)), stan::model::rvalue(f, stan::model::cons_list(stan::model::index_min_max(1, N), stan::model::nil_index_list()), "f")), sigma)); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("sigma"); + names__.push_back("beta"); + names__.push_back("rho"); + names__.push_back("alpha"); + names__.push_back("eta"); + names__.push_back("gamma"); + names__.push_back("f"); + names__.push_back("y_sim"); + names__.push_back("y_pred"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(sumN); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(M_K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(sumN); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_model4_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + double sigma = in__.scalar_lb_constrain(0); + vars__.push_back(sigma); + Eigen::Matrix beta = in__.simplex_constrain(K); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + vars__.push_back(beta(j_1__)); + } + double rho = in__.scalar_lb_constrain(0); + vars__.push_back(rho); + double alpha = in__.scalar_lb_constrain(0); + vars__.push_back(alpha); + Eigen::Matrix eta = in__.vector_constrain(sumN); + size_t eta_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + vars__.push_back(eta(j_1__)); + } + Eigen::Matrix gamma = in__.vector_constrain(M_K); + size_t gamma_j_1_max__ = M_K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + vars__.push_back(gamma(j_1__)); + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + // declare and define transformed parameters + current_statement_begin__ = 78; + validate_non_negative_index("f", "sumN", sumN); + Eigen::Matrix f(sumN); + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f, DUMMY_VAR__); + // do transformed parameters statements + { + current_statement_begin__ = 80; + validate_non_negative_index("K_matrix", "sumN", sumN); + validate_non_negative_index("K_matrix", "sumN", sumN); + Eigen::Matrix K_matrix(sumN, sumN); + stan::math::initialize(K_matrix, DUMMY_VAR__); + stan::math::fill(K_matrix, DUMMY_VAR__); + stan::math::assign(K_matrix,add(cov_exp_quad(time, alpha, rho), diag_matrix(rep_vector(1e-9, sumN)))); + current_statement_begin__ = 82; + validate_non_negative_index("L_K", "sumN", sumN); + validate_non_negative_index("L_K", "sumN", sumN); + Eigen::Matrix L_K(sumN, sumN); + stan::math::initialize(L_K, DUMMY_VAR__); + stan::math::fill(L_K, DUMMY_VAR__); + stan::math::assign(L_K,cholesky_decompose(K_matrix)); + current_statement_begin__ = 83; + stan::math::assign(f, multiply(L_K, eta)); + } + if (!include_gqs__ && !include_tparams__) return; + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + // write transformed parameters + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + vars__.push_back(f(j_1__)); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 99; + validate_non_negative_index("y_sim", "N", N); + Eigen::Matrix y_sim(N); + stan::math::initialize(y_sim, DUMMY_VAR__); + stan::math::fill(y_sim, DUMMY_VAR__); + current_statement_begin__ = 100; + validate_non_negative_index("y_pred", "N_pred", N_pred); + Eigen::Matrix y_pred(N_pred); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 101; + for (int i = 1; i <= N; ++i) { + current_statement_begin__ = 102; + stan::model::assign(y_sim, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + ((normal_rng(((multiply(stan::model::rvalue(X_std, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_std"), beta) + multiply(stan::model::rvalue(M_std, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "M_std"), gamma)) + get_base1(f, i, "f", 1)), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable y_sim"); + } + current_statement_begin__ = 105; + for (int j = 1; j <= N_pred; ++j) { + current_statement_begin__ = 106; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), + ((normal_rng(((multiply(stan::model::rvalue(X_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_pred_std"), beta) + multiply(stan::model::rvalue(M_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "M_pred_std"), gamma)) + get_base1(f, (N + j), "f", 1)), sigma, base_rng__) * sd_y) + mean_y), + "assigning variable y_pred"); + } + // validate, write generated quantities + current_statement_begin__ = 99; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + vars__.push_back(y_sim(j_1__)); + } + current_statement_begin__ = 100; + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + vars__.push_back(y_pred(j_1__)); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_model4"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_j_1_max__ = K; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "rho"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha"; + param_names__.push_back(param_name_stream__.str()); + size_t eta_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "eta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t gamma_j_1_max__ = M_K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "f" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma"; + param_names__.push_back(param_name_stream__.str()); + size_t beta_j_1_max__ = (K - 1); + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + param_name_stream__.str(std::string()); + param_name_stream__ << "rho"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha"; + param_names__.push_back(param_name_stream__.str()); + size_t eta_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "eta" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t gamma_j_1_max__ = M_K; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "f" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t y_pred_j_1_max__ = N_pred; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } +}; // model +} // namespace +typedef model_model4_namespace::model_model4 stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_model5.cc b/src/stanExports_model5.cc new file mode 100644 index 0000000..ec1bff8 --- /dev/null +++ b/src/stanExports_model5.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_model5.h" + +RCPP_MODULE(stan_fit4model5_mod) { + + + class_ >("model_model5") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_model5.h b/src/stanExports_model5.h new file mode 100644 index 0000000..9f84d04 --- /dev/null +++ b/src/stanExports_model5.h @@ -0,0 +1,606 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_model5_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_model5"); + reader.add_event(81, 79, "end", "model_model5"); + return reader; +} +#include +class model_model5 + : public stan::model::model_base_crtp { +private: + int N; + int I; + std::vector y; + int K; + matrix_d X; + int N_pred; + matrix_d X_pred; + matrix_d X_std; + matrix_d X_pred_std; + std::vector y_std; + std::vector mean_y; + std::vector sd_y; +public: + model_model5(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_model5(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_model5_namespace::model_model5"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 20; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 21; + context__.validate_dims("data initialization", "I", "int", context__.to_vec()); + I = int(0); + vals_i__ = context__.vals_i("I"); + pos__ = 0; + I = vals_i__[pos__++]; + check_greater_or_equal(function__, "I", I, 1); + current_statement_begin__ = 22; + validate_non_negative_index("y", "N", N); + validate_non_negative_index("y", "I", I); + context__.validate_dims("data initialization", "y", "vector_d", context__.to_vec(I,N)); + y = std::vector >(I, Eigen::Matrix(N)); + vals_r__ = context__.vals_r("y"); + pos__ = 0; + size_t y_j_1_max__ = N; + size_t y_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_k_0_max__; ++k_0__) { + y[k_0__](j_1__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 23; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 0); + current_statement_begin__ = 24; + validate_non_negative_index("X", "N", N); + validate_non_negative_index("X", "K", K); + context__.validate_dims("data initialization", "X", "matrix_d", context__.to_vec(N,K)); + X = Eigen::Matrix(N, K); + vals_r__ = context__.vals_r("X"); + pos__ = 0; + size_t X_j_2_max__ = K; + size_t X_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < X_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_j_1_max__; ++j_1__) { + X(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 25; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 26; + validate_non_negative_index("X_pred", "N_pred", N_pred); + validate_non_negative_index("X_pred", "K", K); + context__.validate_dims("data initialization", "X_pred", "matrix_d", context__.to_vec(N_pred,K)); + X_pred = Eigen::Matrix(N_pred, K); + vals_r__ = context__.vals_r("X_pred"); + pos__ = 0; + size_t X_pred_j_2_max__ = K; + size_t X_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < X_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_pred_j_1_max__; ++j_1__) { + X_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + // initialize transformed data variables + current_statement_begin__ = 31; + validate_non_negative_index("X_std", "N", N); + validate_non_negative_index("X_std", "K", K); + X_std = Eigen::Matrix(N, K); + stan::math::fill(X_std, DUMMY_VAR__); + current_statement_begin__ = 32; + validate_non_negative_index("X_pred_std", "N_pred", N_pred); + validate_non_negative_index("X_pred_std", "K", K); + X_pred_std = Eigen::Matrix(N_pred, K); + stan::math::fill(X_pred_std, DUMMY_VAR__); + current_statement_begin__ = 33; + validate_non_negative_index("y_std", "N", N); + validate_non_negative_index("y_std", "I", I); + y_std = std::vector >(I, Eigen::Matrix(N)); + stan::math::fill(y_std, DUMMY_VAR__); + current_statement_begin__ = 34; + validate_non_negative_index("mean_y", "I", I); + mean_y = std::vector(I, double(0)); + stan::math::fill(mean_y, DUMMY_VAR__); + current_statement_begin__ = 35; + validate_non_negative_index("sd_y", "I", I); + sd_y = std::vector(I, double(0)); + stan::math::fill(sd_y, DUMMY_VAR__); + // execute transformed data statements + { + current_statement_begin__ = 37; + validate_non_negative_index("mean_X", "K", K); + Eigen::Matrix mean_X(K); + stan::math::initialize(mean_X, DUMMY_VAR__); + stan::math::fill(mean_X, DUMMY_VAR__); + current_statement_begin__ = 38; + validate_non_negative_index("sd_X", "K", K); + Eigen::Matrix sd_X(K); + stan::math::initialize(sd_X, DUMMY_VAR__); + stan::math::fill(sd_X, DUMMY_VAR__); + current_statement_begin__ = 39; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 40; + stan::model::assign(mean_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + mean(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable mean_X"); + current_statement_begin__ = 41; + stan::model::assign(sd_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + sd(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable sd_X"); + current_statement_begin__ = 42; + stan::model::assign(X_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_std"); + current_statement_begin__ = 43; + stan::model::assign(X_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X_pred"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_pred_std"); + } + current_statement_begin__ = 45; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 46; + stan::model::assign(mean_y, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + mean(get_base1(y, i, "y", 1)), + "assigning variable mean_y"); + current_statement_begin__ = 47; + stan::model::assign(sd_y, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + sd(get_base1(y, i, "y", 1)), + "assigning variable sd_y"); + current_statement_begin__ = 48; + stan::model::assign(y_std, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + divide(subtract(get_base1(y, i, "y", 1), get_base1(mean_y, i, "mean_y", 1)), get_base1(sd_y, i, "sd_y", 1)), + "assigning variable y_std"); + } + } + // validate transformed data + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 54; + validate_non_negative_index("sigma", "I", I); + num_params_r__ += (1 * I); + current_statement_begin__ = 55; + validate_non_negative_index("beta", "K", K); + validate_non_negative_index("beta", "I", I); + num_params_r__ += ((K - 1) * I); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_model5() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 54; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + validate_non_negative_index("sigma", "I", I); + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec(I)); + std::vector sigma(I, double(0)); + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + sigma[k_0__] = vals_r__[pos__++]; + } + size_t sigma_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < sigma_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, sigma[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 55; + if (!(context__.contains_r("beta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta"); + pos__ = 0U; + validate_non_negative_index("beta", "K", K); + validate_non_negative_index("beta", "I", I); + context__.validate_dims("parameter initialization", "beta", "vector_d", context__.to_vec(I,K)); + std::vector > beta(I, Eigen::Matrix(K)); + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + beta[k_0__](j_1__) = vals_r__[pos__++]; + } + } + size_t beta_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < beta_i_0_max__; ++i_0__) { + try { + writer__.simplex_unconstrain(beta[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 54; + std::vector sigma; + size_t sigma_d_0_max__ = I; + sigma.reserve(sigma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { + if (jacobian__) + sigma.push_back(in__.scalar_lb_constrain(0, lp__)); + else + sigma.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 55; + std::vector > beta; + size_t beta_d_0_max__ = I; + beta.reserve(beta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < beta_d_0_max__; ++d_0__) { + if (jacobian__) + beta.push_back(in__.simplex_constrain(K, lp__)); + else + beta.push_back(in__.simplex_constrain(K)); + } + // model body + current_statement_begin__ = 59; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 60; + lp_accum__.add(normal_log(get_base1(sigma, i, "sigma", 1), 0, 1)); + current_statement_begin__ = 61; + lp_accum__.add(normal_log(get_base1(y_std, i, "y_std", 1), multiply(X_std, get_base1(beta, i, "beta", 1)), get_base1(sigma, i, "sigma", 1))); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("sigma"); + names__.push_back("beta"); + names__.push_back("y_sim"); + names__.push_back("y_pred"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(I); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_model5_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + std::vector sigma; + size_t sigma_d_0_max__ = I; + sigma.reserve(sigma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { + sigma.push_back(in__.scalar_lb_constrain(0)); + } + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + vars__.push_back(sigma[k_0__]); + } + std::vector > beta; + size_t beta_d_0_max__ = I; + beta.reserve(beta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < beta_d_0_max__; ++d_0__) { + beta.push_back(in__.simplex_constrain(K)); + } + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + vars__.push_back(beta[k_0__](j_1__)); + } + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + if (!include_gqs__ && !include_tparams__) return; + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 67; + validate_non_negative_index("y_sim", "N", N); + validate_non_negative_index("y_sim", "I", I); + std::vector > y_sim(I, Eigen::Matrix(N)); + stan::math::initialize(y_sim, DUMMY_VAR__); + stan::math::fill(y_sim, DUMMY_VAR__); + current_statement_begin__ = 68; + validate_non_negative_index("y_pred", "N_pred", N_pred); + validate_non_negative_index("y_pred", "I", I); + std::vector > y_pred(I, Eigen::Matrix(N_pred)); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 69; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 70; + for (int n = 1; n <= N; ++n) { + current_statement_begin__ = 71; + stan::model::assign(y_sim, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list())), + ((normal_rng(multiply(stan::model::rvalue(X_std, stan::model::cons_list(stan::model::index_uni(n), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_std"), get_base1(beta, i, "beta", 1)), get_base1(sigma, i, "sigma", 1), base_rng__) * get_base1(sd_y, i, "sd_y", 1)) + get_base1(mean_y, i, "mean_y", 1)), + "assigning variable y_sim"); + } + current_statement_begin__ = 74; + for (int j = 1; j <= N_pred; ++j) { + current_statement_begin__ = 75; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + ((normal_rng(multiply(stan::model::rvalue(X_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_pred_std"), get_base1(beta, i, "beta", 1)), get_base1(sigma, i, "sigma", 1), base_rng__) * get_base1(sd_y, i, "sd_y", 1)) + get_base1(mean_y, i, "mean_y", 1)), + "assigning variable y_pred"); + } + } + // validate, write generated quantities + current_statement_begin__ = 67; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + vars__.push_back(y_sim[k_0__](j_1__)); + } + } + current_statement_begin__ = 68; + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + vars__.push_back(y_pred[k_0__](j_1__)); + } + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_model5"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t beta_j_1_max__ = (K - 1); + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } +}; // model +} // namespace +typedef model_model5_namespace::model_model5 stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_model6.cc b/src/stanExports_model6.cc new file mode 100644 index 0000000..2ab7b0e --- /dev/null +++ b/src/stanExports_model6.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_model6.h" + +RCPP_MODULE(stan_fit4model6_mod) { + + + class_ >("model_model6") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_model6.h b/src/stanExports_model6.h new file mode 100644 index 0000000..c7d2b52 --- /dev/null +++ b/src/stanExports_model6.h @@ -0,0 +1,777 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_model6_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_model6"); + reader.add_event(101, 99, "end", "model_model6"); + return reader; +} +#include +class model_model6 + : public stan::model::model_base_crtp { +private: + int N; + int I; + std::vector y; + int K; + matrix_d X; + int M_K; + std::vector M; + int N_pred; + matrix_d X_pred; + std::vector M_pred; + matrix_d X_std; + matrix_d X_pred_std; + std::vector y_std; + std::vector mean_y; + std::vector sd_y; + std::vector M_std; + std::vector M_pred_std; +public: + model_model6(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_model6(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_model6_namespace::model_model6"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 21; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 22; + context__.validate_dims("data initialization", "I", "int", context__.to_vec()); + I = int(0); + vals_i__ = context__.vals_i("I"); + pos__ = 0; + I = vals_i__[pos__++]; + check_greater_or_equal(function__, "I", I, 1); + current_statement_begin__ = 23; + validate_non_negative_index("y", "N", N); + validate_non_negative_index("y", "I", I); + context__.validate_dims("data initialization", "y", "vector_d", context__.to_vec(I,N)); + y = std::vector >(I, Eigen::Matrix(N)); + vals_r__ = context__.vals_r("y"); + pos__ = 0; + size_t y_j_1_max__ = N; + size_t y_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_k_0_max__; ++k_0__) { + y[k_0__](j_1__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 24; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 0); + current_statement_begin__ = 25; + validate_non_negative_index("X", "N", N); + validate_non_negative_index("X", "K", K); + context__.validate_dims("data initialization", "X", "matrix_d", context__.to_vec(N,K)); + X = Eigen::Matrix(N, K); + vals_r__ = context__.vals_r("X"); + pos__ = 0; + size_t X_j_2_max__ = K; + size_t X_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < X_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_j_1_max__; ++j_1__) { + X(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 26; + context__.validate_dims("data initialization", "M_K", "int", context__.to_vec()); + M_K = int(0); + vals_i__ = context__.vals_i("M_K"); + pos__ = 0; + M_K = vals_i__[pos__++]; + check_greater_or_equal(function__, "M_K", M_K, 0); + current_statement_begin__ = 27; + validate_non_negative_index("M", "N", N); + validate_non_negative_index("M", "M_K", M_K); + validate_non_negative_index("M", "I", I); + context__.validate_dims("data initialization", "M", "matrix_d", context__.to_vec(I,N,M_K)); + M = std::vector >(I, Eigen::Matrix(N, M_K)); + vals_r__ = context__.vals_r("M"); + pos__ = 0; + size_t M_j_2_max__ = M_K; + size_t M_j_1_max__ = N; + size_t M_k_0_max__ = I; + for (size_t j_2__ = 0; j_2__ < M_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < M_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < M_k_0_max__; ++k_0__) { + M[k_0__](j_1__, j_2__) = vals_r__[pos__++]; + } + } + } + current_statement_begin__ = 28; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 29; + validate_non_negative_index("X_pred", "N_pred", N_pred); + validate_non_negative_index("X_pred", "K", K); + context__.validate_dims("data initialization", "X_pred", "matrix_d", context__.to_vec(N_pred,K)); + X_pred = Eigen::Matrix(N_pred, K); + vals_r__ = context__.vals_r("X_pred"); + pos__ = 0; + size_t X_pred_j_2_max__ = K; + size_t X_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < X_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_pred_j_1_max__; ++j_1__) { + X_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 30; + validate_non_negative_index("M_pred", "N_pred", N_pred); + validate_non_negative_index("M_pred", "M_K", M_K); + validate_non_negative_index("M_pred", "I", I); + context__.validate_dims("data initialization", "M_pred", "matrix_d", context__.to_vec(I,N_pred,M_K)); + M_pred = std::vector >(I, Eigen::Matrix(N_pred, M_K)); + vals_r__ = context__.vals_r("M_pred"); + pos__ = 0; + size_t M_pred_j_2_max__ = M_K; + size_t M_pred_j_1_max__ = N_pred; + size_t M_pred_k_0_max__ = I; + for (size_t j_2__ = 0; j_2__ < M_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < M_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < M_pred_k_0_max__; ++k_0__) { + M_pred[k_0__](j_1__, j_2__) = vals_r__[pos__++]; + } + } + } + // initialize transformed data variables + current_statement_begin__ = 35; + validate_non_negative_index("X_std", "N", N); + validate_non_negative_index("X_std", "K", K); + X_std = Eigen::Matrix(N, K); + stan::math::fill(X_std, DUMMY_VAR__); + current_statement_begin__ = 36; + validate_non_negative_index("X_pred_std", "N_pred", N_pred); + validate_non_negative_index("X_pred_std", "K", K); + X_pred_std = Eigen::Matrix(N_pred, K); + stan::math::fill(X_pred_std, DUMMY_VAR__); + current_statement_begin__ = 37; + validate_non_negative_index("y_std", "N", N); + validate_non_negative_index("y_std", "I", I); + y_std = std::vector >(I, Eigen::Matrix(N)); + stan::math::fill(y_std, DUMMY_VAR__); + current_statement_begin__ = 38; + validate_non_negative_index("mean_y", "I", I); + mean_y = std::vector(I, double(0)); + stan::math::fill(mean_y, DUMMY_VAR__); + current_statement_begin__ = 39; + validate_non_negative_index("sd_y", "I", I); + sd_y = std::vector(I, double(0)); + stan::math::fill(sd_y, DUMMY_VAR__); + current_statement_begin__ = 40; + validate_non_negative_index("M_std", "N", N); + validate_non_negative_index("M_std", "M_K", M_K); + validate_non_negative_index("M_std", "I", I); + M_std = std::vector >(I, Eigen::Matrix(N, M_K)); + stan::math::fill(M_std, DUMMY_VAR__); + current_statement_begin__ = 41; + validate_non_negative_index("M_pred_std", "N_pred", N_pred); + validate_non_negative_index("M_pred_std", "M_K", M_K); + validate_non_negative_index("M_pred_std", "I", I); + M_pred_std = std::vector >(I, Eigen::Matrix(N_pred, M_K)); + stan::math::fill(M_pred_std, DUMMY_VAR__); + // execute transformed data statements + { + current_statement_begin__ = 43; + validate_non_negative_index("mean_X", "K", K); + Eigen::Matrix mean_X(K); + stan::math::initialize(mean_X, DUMMY_VAR__); + stan::math::fill(mean_X, DUMMY_VAR__); + current_statement_begin__ = 44; + validate_non_negative_index("sd_X", "K", K); + Eigen::Matrix sd_X(K); + stan::math::initialize(sd_X, DUMMY_VAR__); + stan::math::fill(sd_X, DUMMY_VAR__); + current_statement_begin__ = 45; + validate_non_negative_index("mean_M", "M_K", M_K); + validate_non_negative_index("mean_M", "I", I); + std::vector > mean_M(I, Eigen::Matrix(M_K)); + stan::math::initialize(mean_M, DUMMY_VAR__); + stan::math::fill(mean_M, DUMMY_VAR__); + current_statement_begin__ = 46; + validate_non_negative_index("sd_M", "M_K", M_K); + validate_non_negative_index("sd_M", "I", I); + std::vector > sd_M(I, Eigen::Matrix(M_K)); + stan::math::initialize(sd_M, DUMMY_VAR__); + stan::math::fill(sd_M, DUMMY_VAR__); + current_statement_begin__ = 48; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 49; + stan::model::assign(mean_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + mean(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable mean_X"); + current_statement_begin__ = 50; + stan::model::assign(sd_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + sd(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable sd_X"); + current_statement_begin__ = 51; + stan::model::assign(X_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_std"); + current_statement_begin__ = 52; + stan::model::assign(X_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X_pred"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_pred_std"); + } + current_statement_begin__ = 55; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 56; + stan::model::assign(mean_y, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + mean(get_base1(y, i, "y", 1)), + "assigning variable mean_y"); + current_statement_begin__ = 57; + stan::model::assign(sd_y, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + sd(get_base1(y, i, "y", 1)), + "assigning variable sd_y"); + current_statement_begin__ = 58; + stan::model::assign(y_std, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + divide(subtract(get_base1(y, i, "y", 1), get_base1(mean_y, i, "mean_y", 1)), get_base1(sd_y, i, "sd_y", 1)), + "assigning variable y_std"); + current_statement_begin__ = 60; + for (int j = 1; j <= M_K; ++j) { + current_statement_begin__ = 61; + stan::model::assign(mean_M, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + mean(stan::model::rvalue(get_base1(M, i, "M", 1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M[i]")), + "assigning variable mean_M"); + current_statement_begin__ = 62; + stan::model::assign(sd_M, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + sd(stan::model::rvalue(get_base1(M, i, "M", 1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M[i]")), + "assigning variable sd_M"); + current_statement_begin__ = 63; + stan::model::assign(M_std, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()))), + divide(subtract(stan::model::rvalue(get_base1(M, i, "M", 1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M[i]"), get_base1(get_base1(mean_M, i, "mean_M", 1), j, "mean_M", 2)), get_base1(get_base1(sd_M, i, "sd_M", 1), j, "sd_M", 2)), + "assigning variable M_std"); + current_statement_begin__ = 64; + stan::model::assign(M_pred_std, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()))), + divide(subtract(stan::model::rvalue(get_base1(M_pred, i, "M_pred", 1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M_pred[i]"), get_base1(get_base1(mean_M, i, "mean_M", 1), j, "mean_M", 2)), get_base1(get_base1(sd_M, i, "sd_M", 1), j, "sd_M", 2)), + "assigning variable M_pred_std"); + } + } + } + // validate transformed data + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 71; + validate_non_negative_index("sigma", "I", I); + num_params_r__ += (1 * I); + current_statement_begin__ = 72; + validate_non_negative_index("beta", "K", K); + validate_non_negative_index("beta", "I", I); + num_params_r__ += ((K - 1) * I); + current_statement_begin__ = 73; + validate_non_negative_index("gamma", "M_K", M_K); + validate_non_negative_index("gamma", "I", I); + num_params_r__ += (M_K * I); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_model6() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 71; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + validate_non_negative_index("sigma", "I", I); + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec(I)); + std::vector sigma(I, double(0)); + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + sigma[k_0__] = vals_r__[pos__++]; + } + size_t sigma_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < sigma_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, sigma[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 72; + if (!(context__.contains_r("beta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta"); + pos__ = 0U; + validate_non_negative_index("beta", "K", K); + validate_non_negative_index("beta", "I", I); + context__.validate_dims("parameter initialization", "beta", "vector_d", context__.to_vec(I,K)); + std::vector > beta(I, Eigen::Matrix(K)); + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + beta[k_0__](j_1__) = vals_r__[pos__++]; + } + } + size_t beta_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < beta_i_0_max__; ++i_0__) { + try { + writer__.simplex_unconstrain(beta[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 73; + if (!(context__.contains_r("gamma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable gamma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("gamma"); + pos__ = 0U; + validate_non_negative_index("gamma", "M_K", M_K); + validate_non_negative_index("gamma", "I", I); + context__.validate_dims("parameter initialization", "gamma", "vector_d", context__.to_vec(I,M_K)); + std::vector > gamma(I, Eigen::Matrix(M_K)); + size_t gamma_j_1_max__ = M_K; + size_t gamma_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < gamma_k_0_max__; ++k_0__) { + gamma[k_0__](j_1__) = vals_r__[pos__++]; + } + } + size_t gamma_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < gamma_i_0_max__; ++i_0__) { + try { + writer__.vector_unconstrain(gamma[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable gamma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 71; + std::vector sigma; + size_t sigma_d_0_max__ = I; + sigma.reserve(sigma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { + if (jacobian__) + sigma.push_back(in__.scalar_lb_constrain(0, lp__)); + else + sigma.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 72; + std::vector > beta; + size_t beta_d_0_max__ = I; + beta.reserve(beta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < beta_d_0_max__; ++d_0__) { + if (jacobian__) + beta.push_back(in__.simplex_constrain(K, lp__)); + else + beta.push_back(in__.simplex_constrain(K)); + } + current_statement_begin__ = 73; + std::vector > gamma; + size_t gamma_d_0_max__ = I; + gamma.reserve(gamma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < gamma_d_0_max__; ++d_0__) { + if (jacobian__) + gamma.push_back(in__.vector_constrain(M_K, lp__)); + else + gamma.push_back(in__.vector_constrain(M_K)); + } + // model body + current_statement_begin__ = 77; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 78; + lp_accum__.add(normal_log(get_base1(sigma, i, "sigma", 1), 0, 1)); + current_statement_begin__ = 79; + lp_accum__.add(normal_log(get_base1(gamma, i, "gamma", 1), 0, 1)); + current_statement_begin__ = 80; + lp_accum__.add(normal_log(get_base1(y_std, i, "y_std", 1), add(multiply(X_std, get_base1(beta, i, "beta", 1)), multiply(get_base1(M_std, i, "M_std", 1), get_base1(gamma, i, "gamma", 1))), get_base1(sigma, i, "sigma", 1))); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("sigma"); + names__.push_back("beta"); + names__.push_back("gamma"); + names__.push_back("y_sim"); + names__.push_back("y_pred"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(I); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(M_K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_model6_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + std::vector sigma; + size_t sigma_d_0_max__ = I; + sigma.reserve(sigma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { + sigma.push_back(in__.scalar_lb_constrain(0)); + } + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + vars__.push_back(sigma[k_0__]); + } + std::vector > beta; + size_t beta_d_0_max__ = I; + beta.reserve(beta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < beta_d_0_max__; ++d_0__) { + beta.push_back(in__.simplex_constrain(K)); + } + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + vars__.push_back(beta[k_0__](j_1__)); + } + } + std::vector > gamma; + size_t gamma_d_0_max__ = I; + gamma.reserve(gamma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < gamma_d_0_max__; ++d_0__) { + gamma.push_back(in__.vector_constrain(M_K)); + } + size_t gamma_j_1_max__ = M_K; + size_t gamma_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < gamma_k_0_max__; ++k_0__) { + vars__.push_back(gamma[k_0__](j_1__)); + } + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + if (!include_gqs__ && !include_tparams__) return; + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 86; + validate_non_negative_index("y_sim", "N", N); + validate_non_negative_index("y_sim", "I", I); + std::vector > y_sim(I, Eigen::Matrix(N)); + stan::math::initialize(y_sim, DUMMY_VAR__); + stan::math::fill(y_sim, DUMMY_VAR__); + current_statement_begin__ = 87; + validate_non_negative_index("y_pred", "N_pred", N_pred); + validate_non_negative_index("y_pred", "I", I); + std::vector > y_pred(I, Eigen::Matrix(N_pred)); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 88; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 89; + for (int n = 1; n <= N; ++n) { + current_statement_begin__ = 90; + stan::model::assign(y_sim, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list())), + ((normal_rng((multiply(stan::model::rvalue(X_std, stan::model::cons_list(stan::model::index_uni(n), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_std"), get_base1(beta, i, "beta", 1)) + multiply(stan::model::rvalue(get_base1(M_std, i, "M_std", 1), stan::model::cons_list(stan::model::index_uni(n), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "M_std[i]"), get_base1(gamma, i, "gamma", 1))), get_base1(sigma, i, "sigma", 1), base_rng__) * get_base1(sd_y, i, "sd_y", 1)) + get_base1(mean_y, i, "mean_y", 1)), + "assigning variable y_sim"); + } + current_statement_begin__ = 93; + for (int j = 1; j <= N_pred; ++j) { + current_statement_begin__ = 94; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + ((normal_rng((multiply(stan::model::rvalue(X_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_pred_std"), get_base1(beta, i, "beta", 1)) + multiply(stan::model::rvalue(get_base1(M_pred_std, i, "M_pred_std", 1), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "M_pred_std[i]"), get_base1(gamma, i, "gamma", 1))), get_base1(sigma, i, "sigma", 1), base_rng__) * get_base1(sd_y, i, "sd_y", 1)) + get_base1(mean_y, i, "mean_y", 1)), + "assigning variable y_pred"); + } + } + // validate, write generated quantities + current_statement_begin__ = 86; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + vars__.push_back(y_sim[k_0__](j_1__)); + } + } + current_statement_begin__ = 87; + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + vars__.push_back(y_pred[k_0__](j_1__)); + } + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_model6"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t gamma_j_1_max__ = M_K; + size_t gamma_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < gamma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t beta_j_1_max__ = (K - 1); + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t gamma_j_1_max__ = M_K; + size_t gamma_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < gamma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } +}; // model +} // namespace +typedef model_model6_namespace::model_model6 stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_model7.cc b/src/stanExports_model7.cc new file mode 100644 index 0000000..49724d7 --- /dev/null +++ b/src/stanExports_model7.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_model7.h" + +RCPP_MODULE(stan_fit4model7_mod) { + + + class_ >("model_model7") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_model7.h b/src/stanExports_model7.h new file mode 100644 index 0000000..3a3d8f6 --- /dev/null +++ b/src/stanExports_model7.h @@ -0,0 +1,1110 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_model7_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_model7"); + reader.add_event(125, 123, "end", "model_model7"); + return reader; +} +#include +class model_model7 + : public stan::model::model_base_crtp { +private: + int N; + int I; + std::vector y; + int K; + matrix_d X; + int M_K; + std::vector M; + int N_pred; + matrix_d X_pred; + std::vector M_pred; + matrix_d X_std; + matrix_d X_pred_std; + std::vector y_std; + std::vector mean_y; + std::vector sd_y; + std::vector M_std; + std::vector M_pred_std; + std::vector time; + int sumN; +public: + model_model7(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_model7(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_model7_namespace::model_model7"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 21; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 22; + context__.validate_dims("data initialization", "I", "int", context__.to_vec()); + I = int(0); + vals_i__ = context__.vals_i("I"); + pos__ = 0; + I = vals_i__[pos__++]; + check_greater_or_equal(function__, "I", I, 1); + current_statement_begin__ = 23; + validate_non_negative_index("y", "N", N); + validate_non_negative_index("y", "I", I); + context__.validate_dims("data initialization", "y", "vector_d", context__.to_vec(I,N)); + y = std::vector >(I, Eigen::Matrix(N)); + vals_r__ = context__.vals_r("y"); + pos__ = 0; + size_t y_j_1_max__ = N; + size_t y_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_k_0_max__; ++k_0__) { + y[k_0__](j_1__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 24; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 0); + current_statement_begin__ = 25; + validate_non_negative_index("X", "N", N); + validate_non_negative_index("X", "K", K); + context__.validate_dims("data initialization", "X", "matrix_d", context__.to_vec(N,K)); + X = Eigen::Matrix(N, K); + vals_r__ = context__.vals_r("X"); + pos__ = 0; + size_t X_j_2_max__ = K; + size_t X_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < X_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_j_1_max__; ++j_1__) { + X(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 26; + context__.validate_dims("data initialization", "M_K", "int", context__.to_vec()); + M_K = int(0); + vals_i__ = context__.vals_i("M_K"); + pos__ = 0; + M_K = vals_i__[pos__++]; + check_greater_or_equal(function__, "M_K", M_K, 0); + current_statement_begin__ = 27; + validate_non_negative_index("M", "N", N); + validate_non_negative_index("M", "M_K", M_K); + validate_non_negative_index("M", "I", I); + context__.validate_dims("data initialization", "M", "matrix_d", context__.to_vec(I,N,M_K)); + M = std::vector >(I, Eigen::Matrix(N, M_K)); + vals_r__ = context__.vals_r("M"); + pos__ = 0; + size_t M_j_2_max__ = M_K; + size_t M_j_1_max__ = N; + size_t M_k_0_max__ = I; + for (size_t j_2__ = 0; j_2__ < M_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < M_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < M_k_0_max__; ++k_0__) { + M[k_0__](j_1__, j_2__) = vals_r__[pos__++]; + } + } + } + current_statement_begin__ = 28; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 29; + validate_non_negative_index("X_pred", "N_pred", N_pred); + validate_non_negative_index("X_pred", "K", K); + context__.validate_dims("data initialization", "X_pred", "matrix_d", context__.to_vec(N_pred,K)); + X_pred = Eigen::Matrix(N_pred, K); + vals_r__ = context__.vals_r("X_pred"); + pos__ = 0; + size_t X_pred_j_2_max__ = K; + size_t X_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < X_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_pred_j_1_max__; ++j_1__) { + X_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 30; + validate_non_negative_index("M_pred", "N_pred", N_pred); + validate_non_negative_index("M_pred", "M_K", M_K); + validate_non_negative_index("M_pred", "I", I); + context__.validate_dims("data initialization", "M_pred", "matrix_d", context__.to_vec(I,N_pred,M_K)); + M_pred = std::vector >(I, Eigen::Matrix(N_pred, M_K)); + vals_r__ = context__.vals_r("M_pred"); + pos__ = 0; + size_t M_pred_j_2_max__ = M_K; + size_t M_pred_j_1_max__ = N_pred; + size_t M_pred_k_0_max__ = I; + for (size_t j_2__ = 0; j_2__ < M_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < M_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < M_pred_k_0_max__; ++k_0__) { + M_pred[k_0__](j_1__, j_2__) = vals_r__[pos__++]; + } + } + } + // initialize transformed data variables + current_statement_begin__ = 35; + validate_non_negative_index("X_std", "N", N); + validate_non_negative_index("X_std", "K", K); + X_std = Eigen::Matrix(N, K); + stan::math::fill(X_std, DUMMY_VAR__); + current_statement_begin__ = 36; + validate_non_negative_index("X_pred_std", "N_pred", N_pred); + validate_non_negative_index("X_pred_std", "K", K); + X_pred_std = Eigen::Matrix(N_pred, K); + stan::math::fill(X_pred_std, DUMMY_VAR__); + current_statement_begin__ = 37; + validate_non_negative_index("y_std", "N", N); + validate_non_negative_index("y_std", "I", I); + y_std = std::vector >(I, Eigen::Matrix(N)); + stan::math::fill(y_std, DUMMY_VAR__); + current_statement_begin__ = 38; + validate_non_negative_index("mean_y", "I", I); + mean_y = std::vector(I, double(0)); + stan::math::fill(mean_y, DUMMY_VAR__); + current_statement_begin__ = 39; + validate_non_negative_index("sd_y", "I", I); + sd_y = std::vector(I, double(0)); + stan::math::fill(sd_y, DUMMY_VAR__); + current_statement_begin__ = 40; + validate_non_negative_index("M_std", "N", N); + validate_non_negative_index("M_std", "M_K", M_K); + validate_non_negative_index("M_std", "I", I); + M_std = std::vector >(I, Eigen::Matrix(N, M_K)); + stan::math::fill(M_std, DUMMY_VAR__); + current_statement_begin__ = 41; + validate_non_negative_index("M_pred_std", "N_pred", N_pred); + validate_non_negative_index("M_pred_std", "M_K", M_K); + validate_non_negative_index("M_pred_std", "I", I); + M_pred_std = std::vector >(I, Eigen::Matrix(N_pred, M_K)); + stan::math::fill(M_pred_std, DUMMY_VAR__); + current_statement_begin__ = 42; + validate_non_negative_index("time", "(N + N_pred)", (N + N_pred)); + time = std::vector((N + N_pred), double(0)); + stan::math::fill(time, DUMMY_VAR__); + current_statement_begin__ = 43; + sumN = int(0); + stan::math::fill(sumN, std::numeric_limits::min()); + stan::math::assign(sumN,(N + N_pred)); + // execute transformed data statements + current_statement_begin__ = 45; + for (int t = 1; t <= sumN; ++t) { + current_statement_begin__ = 46; + stan::model::assign(time, + stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), + t, + "assigning variable time"); + } + { + current_statement_begin__ = 50; + validate_non_negative_index("mean_X", "K", K); + Eigen::Matrix mean_X(K); + stan::math::initialize(mean_X, DUMMY_VAR__); + stan::math::fill(mean_X, DUMMY_VAR__); + current_statement_begin__ = 51; + validate_non_negative_index("sd_X", "K", K); + Eigen::Matrix sd_X(K); + stan::math::initialize(sd_X, DUMMY_VAR__); + stan::math::fill(sd_X, DUMMY_VAR__); + current_statement_begin__ = 52; + validate_non_negative_index("mean_M", "M_K", M_K); + validate_non_negative_index("mean_M", "I", I); + std::vector > mean_M(I, Eigen::Matrix(M_K)); + stan::math::initialize(mean_M, DUMMY_VAR__); + stan::math::fill(mean_M, DUMMY_VAR__); + current_statement_begin__ = 53; + validate_non_negative_index("sd_M", "M_K", M_K); + validate_non_negative_index("sd_M", "I", I); + std::vector > sd_M(I, Eigen::Matrix(M_K)); + stan::math::initialize(sd_M, DUMMY_VAR__); + stan::math::fill(sd_M, DUMMY_VAR__); + current_statement_begin__ = 55; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 56; + stan::model::assign(mean_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + mean(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable mean_X"); + current_statement_begin__ = 57; + stan::model::assign(sd_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + sd(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable sd_X"); + current_statement_begin__ = 58; + stan::model::assign(X_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_std"); + current_statement_begin__ = 59; + stan::model::assign(X_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X_pred"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_pred_std"); + } + current_statement_begin__ = 62; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 63; + stan::model::assign(mean_y, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + mean(get_base1(y, i, "y", 1)), + "assigning variable mean_y"); + current_statement_begin__ = 64; + stan::model::assign(sd_y, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + sd(get_base1(y, i, "y", 1)), + "assigning variable sd_y"); + current_statement_begin__ = 65; + stan::model::assign(y_std, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + divide(subtract(get_base1(y, i, "y", 1), get_base1(mean_y, i, "mean_y", 1)), get_base1(sd_y, i, "sd_y", 1)), + "assigning variable y_std"); + current_statement_begin__ = 67; + for (int j = 1; j <= M_K; ++j) { + current_statement_begin__ = 68; + stan::model::assign(mean_M, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + mean(stan::model::rvalue(get_base1(M, i, "M", 1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M[i]")), + "assigning variable mean_M"); + current_statement_begin__ = 69; + stan::model::assign(sd_M, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + sd(stan::model::rvalue(get_base1(M, i, "M", 1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M[i]")), + "assigning variable sd_M"); + current_statement_begin__ = 70; + stan::model::assign(M_std, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()))), + divide(subtract(stan::model::rvalue(get_base1(M, i, "M", 1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M[i]"), get_base1(get_base1(mean_M, i, "mean_M", 1), j, "mean_M", 2)), get_base1(get_base1(sd_M, i, "sd_M", 1), j, "sd_M", 2)), + "assigning variable M_std"); + current_statement_begin__ = 71; + stan::model::assign(M_pred_std, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()))), + divide(subtract(stan::model::rvalue(get_base1(M_pred, i, "M_pred", 1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), "M_pred[i]"), get_base1(get_base1(mean_M, i, "mean_M", 1), j, "mean_M", 2)), get_base1(get_base1(sd_M, i, "sd_M", 1), j, "sd_M", 2)), + "assigning variable M_pred_std"); + } + } + } + // validate transformed data + current_statement_begin__ = 43; + check_greater_or_equal(function__, "sumN", sumN, 1); + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 78; + validate_non_negative_index("sigma", "I", I); + num_params_r__ += (1 * I); + current_statement_begin__ = 79; + validate_non_negative_index("beta", "K", K); + validate_non_negative_index("beta", "I", I); + num_params_r__ += ((K - 1) * I); + current_statement_begin__ = 80; + validate_non_negative_index("gamma", "M_K", M_K); + validate_non_negative_index("gamma", "I", I); + num_params_r__ += (M_K * I); + current_statement_begin__ = 81; + validate_non_negative_index("rho", "I", I); + num_params_r__ += (1 * I); + current_statement_begin__ = 82; + validate_non_negative_index("alpha", "I", I); + num_params_r__ += (1 * I); + current_statement_begin__ = 83; + validate_non_negative_index("eta", "sumN", sumN); + validate_non_negative_index("eta", "I", I); + num_params_r__ += (sumN * I); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_model7() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 78; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + validate_non_negative_index("sigma", "I", I); + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec(I)); + std::vector sigma(I, double(0)); + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + sigma[k_0__] = vals_r__[pos__++]; + } + size_t sigma_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < sigma_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, sigma[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 79; + if (!(context__.contains_r("beta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta"); + pos__ = 0U; + validate_non_negative_index("beta", "K", K); + validate_non_negative_index("beta", "I", I); + context__.validate_dims("parameter initialization", "beta", "vector_d", context__.to_vec(I,K)); + std::vector > beta(I, Eigen::Matrix(K)); + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + beta[k_0__](j_1__) = vals_r__[pos__++]; + } + } + size_t beta_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < beta_i_0_max__; ++i_0__) { + try { + writer__.simplex_unconstrain(beta[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 80; + if (!(context__.contains_r("gamma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable gamma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("gamma"); + pos__ = 0U; + validate_non_negative_index("gamma", "M_K", M_K); + validate_non_negative_index("gamma", "I", I); + context__.validate_dims("parameter initialization", "gamma", "vector_d", context__.to_vec(I,M_K)); + std::vector > gamma(I, Eigen::Matrix(M_K)); + size_t gamma_j_1_max__ = M_K; + size_t gamma_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < gamma_k_0_max__; ++k_0__) { + gamma[k_0__](j_1__) = vals_r__[pos__++]; + } + } + size_t gamma_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < gamma_i_0_max__; ++i_0__) { + try { + writer__.vector_unconstrain(gamma[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable gamma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 81; + if (!(context__.contains_r("rho"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable rho missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("rho"); + pos__ = 0U; + validate_non_negative_index("rho", "I", I); + context__.validate_dims("parameter initialization", "rho", "double", context__.to_vec(I)); + std::vector rho(I, double(0)); + size_t rho_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) { + rho[k_0__] = vals_r__[pos__++]; + } + size_t rho_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < rho_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, rho[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable rho: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 82; + if (!(context__.contains_r("alpha"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable alpha missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("alpha"); + pos__ = 0U; + validate_non_negative_index("alpha", "I", I); + context__.validate_dims("parameter initialization", "alpha", "double", context__.to_vec(I)); + std::vector alpha(I, double(0)); + size_t alpha_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < alpha_k_0_max__; ++k_0__) { + alpha[k_0__] = vals_r__[pos__++]; + } + size_t alpha_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < alpha_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, alpha[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable alpha: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 83; + if (!(context__.contains_r("eta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable eta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("eta"); + pos__ = 0U; + validate_non_negative_index("eta", "sumN", sumN); + validate_non_negative_index("eta", "I", I); + context__.validate_dims("parameter initialization", "eta", "vector_d", context__.to_vec(I,sumN)); + std::vector > eta(I, Eigen::Matrix(sumN)); + size_t eta_j_1_max__ = sumN; + size_t eta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < eta_k_0_max__; ++k_0__) { + eta[k_0__](j_1__) = vals_r__[pos__++]; + } + } + size_t eta_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < eta_i_0_max__; ++i_0__) { + try { + writer__.vector_unconstrain(eta[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable eta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 78; + std::vector sigma; + size_t sigma_d_0_max__ = I; + sigma.reserve(sigma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { + if (jacobian__) + sigma.push_back(in__.scalar_lb_constrain(0, lp__)); + else + sigma.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 79; + std::vector > beta; + size_t beta_d_0_max__ = I; + beta.reserve(beta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < beta_d_0_max__; ++d_0__) { + if (jacobian__) + beta.push_back(in__.simplex_constrain(K, lp__)); + else + beta.push_back(in__.simplex_constrain(K)); + } + current_statement_begin__ = 80; + std::vector > gamma; + size_t gamma_d_0_max__ = I; + gamma.reserve(gamma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < gamma_d_0_max__; ++d_0__) { + if (jacobian__) + gamma.push_back(in__.vector_constrain(M_K, lp__)); + else + gamma.push_back(in__.vector_constrain(M_K)); + } + current_statement_begin__ = 81; + std::vector rho; + size_t rho_d_0_max__ = I; + rho.reserve(rho_d_0_max__); + for (size_t d_0__ = 0; d_0__ < rho_d_0_max__; ++d_0__) { + if (jacobian__) + rho.push_back(in__.scalar_lb_constrain(0, lp__)); + else + rho.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 82; + std::vector alpha; + size_t alpha_d_0_max__ = I; + alpha.reserve(alpha_d_0_max__); + for (size_t d_0__ = 0; d_0__ < alpha_d_0_max__; ++d_0__) { + if (jacobian__) + alpha.push_back(in__.scalar_lb_constrain(0, lp__)); + else + alpha.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 83; + std::vector > eta; + size_t eta_d_0_max__ = I; + eta.reserve(eta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < eta_d_0_max__; ++d_0__) { + if (jacobian__) + eta.push_back(in__.vector_constrain(sumN, lp__)); + else + eta.push_back(in__.vector_constrain(sumN)); + } + // transformed parameters + current_statement_begin__ = 87; + validate_non_negative_index("f", "sumN", sumN); + validate_non_negative_index("f", "I", I); + std::vector > f(I, Eigen::Matrix(sumN)); + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f, DUMMY_VAR__); + // transformed parameters block statements + current_statement_begin__ = 88; + for (int i = 1; i <= I; ++i) { + { + current_statement_begin__ = 89; + validate_non_negative_index("K_matrix", "sumN", sumN); + validate_non_negative_index("K_matrix", "sumN", sumN); + Eigen::Matrix K_matrix(sumN, sumN); + stan::math::initialize(K_matrix, DUMMY_VAR__); + stan::math::fill(K_matrix, DUMMY_VAR__); + stan::math::assign(K_matrix,add(cov_exp_quad(time, get_base1(alpha, i, "alpha", 1), get_base1(rho, i, "rho", 1)), diag_matrix(rep_vector(1e-9, sumN)))); + current_statement_begin__ = 91; + validate_non_negative_index("L_K", "sumN", sumN); + validate_non_negative_index("L_K", "sumN", sumN); + Eigen::Matrix L_K(sumN, sumN); + stan::math::initialize(L_K, DUMMY_VAR__); + stan::math::fill(L_K, DUMMY_VAR__); + stan::math::assign(L_K,cholesky_decompose(K_matrix)); + current_statement_begin__ = 92; + stan::model::assign(f, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + multiply(L_K, get_base1(eta, i, "eta", 1)), + "assigning variable f"); + } + } + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 87; + size_t f_k_0_max__ = I; + size_t f_j_1_max__ = sumN; + for (size_t k_0__ = 0; k_0__ < f_k_0_max__; ++k_0__) { + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(f[k_0__](j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: f" << "[" << k_0__ << "]" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable f: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + } + // model body + current_statement_begin__ = 97; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 98; + lp_accum__.add(normal_log(get_base1(sigma, i, "sigma", 1), 0, 1)); + current_statement_begin__ = 99; + lp_accum__.add(normal_log(get_base1(gamma, i, "gamma", 1), 0, 1)); + current_statement_begin__ = 100; + lp_accum__.add(normal_log(get_base1(rho, i, "rho", 1), 0, 3)); + current_statement_begin__ = 101; + lp_accum__.add(normal_log(get_base1(alpha, i, "alpha", 1), 0, 1)); + current_statement_begin__ = 102; + lp_accum__.add(normal_log(get_base1(eta, i, "eta", 1), 0, 1)); + current_statement_begin__ = 103; + lp_accum__.add(normal_log(get_base1(y_std, i, "y_std", 1), add(add(multiply(X_std, get_base1(beta, i, "beta", 1)), multiply(get_base1(M_std, i, "M_std", 1), get_base1(gamma, i, "gamma", 1))), stan::model::rvalue(get_base1(f, i, "f", 1), stan::model::cons_list(stan::model::index_min_max(1, N), stan::model::nil_index_list()), "f[i]")), get_base1(sigma, i, "sigma", 1))); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("sigma"); + names__.push_back("beta"); + names__.push_back("gamma"); + names__.push_back("rho"); + names__.push_back("alpha"); + names__.push_back("eta"); + names__.push_back("f"); + names__.push_back("y_sim"); + names__.push_back("y_pred"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(I); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(M_K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(sumN); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(sumN); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_model7_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + std::vector sigma; + size_t sigma_d_0_max__ = I; + sigma.reserve(sigma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { + sigma.push_back(in__.scalar_lb_constrain(0)); + } + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + vars__.push_back(sigma[k_0__]); + } + std::vector > beta; + size_t beta_d_0_max__ = I; + beta.reserve(beta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < beta_d_0_max__; ++d_0__) { + beta.push_back(in__.simplex_constrain(K)); + } + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + vars__.push_back(beta[k_0__](j_1__)); + } + } + std::vector > gamma; + size_t gamma_d_0_max__ = I; + gamma.reserve(gamma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < gamma_d_0_max__; ++d_0__) { + gamma.push_back(in__.vector_constrain(M_K)); + } + size_t gamma_j_1_max__ = M_K; + size_t gamma_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < gamma_k_0_max__; ++k_0__) { + vars__.push_back(gamma[k_0__](j_1__)); + } + } + std::vector rho; + size_t rho_d_0_max__ = I; + rho.reserve(rho_d_0_max__); + for (size_t d_0__ = 0; d_0__ < rho_d_0_max__; ++d_0__) { + rho.push_back(in__.scalar_lb_constrain(0)); + } + size_t rho_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) { + vars__.push_back(rho[k_0__]); + } + std::vector alpha; + size_t alpha_d_0_max__ = I; + alpha.reserve(alpha_d_0_max__); + for (size_t d_0__ = 0; d_0__ < alpha_d_0_max__; ++d_0__) { + alpha.push_back(in__.scalar_lb_constrain(0)); + } + size_t alpha_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < alpha_k_0_max__; ++k_0__) { + vars__.push_back(alpha[k_0__]); + } + std::vector > eta; + size_t eta_d_0_max__ = I; + eta.reserve(eta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < eta_d_0_max__; ++d_0__) { + eta.push_back(in__.vector_constrain(sumN)); + } + size_t eta_j_1_max__ = sumN; + size_t eta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < eta_k_0_max__; ++k_0__) { + vars__.push_back(eta[k_0__](j_1__)); + } + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + // declare and define transformed parameters + current_statement_begin__ = 87; + validate_non_negative_index("f", "sumN", sumN); + validate_non_negative_index("f", "I", I); + std::vector > f(I, Eigen::Matrix(sumN)); + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f, DUMMY_VAR__); + // do transformed parameters statements + current_statement_begin__ = 88; + for (int i = 1; i <= I; ++i) { + { + current_statement_begin__ = 89; + validate_non_negative_index("K_matrix", "sumN", sumN); + validate_non_negative_index("K_matrix", "sumN", sumN); + Eigen::Matrix K_matrix(sumN, sumN); + stan::math::initialize(K_matrix, DUMMY_VAR__); + stan::math::fill(K_matrix, DUMMY_VAR__); + stan::math::assign(K_matrix,add(cov_exp_quad(time, get_base1(alpha, i, "alpha", 1), get_base1(rho, i, "rho", 1)), diag_matrix(rep_vector(1e-9, sumN)))); + current_statement_begin__ = 91; + validate_non_negative_index("L_K", "sumN", sumN); + validate_non_negative_index("L_K", "sumN", sumN); + Eigen::Matrix L_K(sumN, sumN); + stan::math::initialize(L_K, DUMMY_VAR__); + stan::math::fill(L_K, DUMMY_VAR__); + stan::math::assign(L_K,cholesky_decompose(K_matrix)); + current_statement_begin__ = 92; + stan::model::assign(f, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + multiply(L_K, get_base1(eta, i, "eta", 1)), + "assigning variable f"); + } + } + if (!include_gqs__ && !include_tparams__) return; + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + // write transformed parameters + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + size_t f_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < f_k_0_max__; ++k_0__) { + vars__.push_back(f[k_0__](j_1__)); + } + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 109; + validate_non_negative_index("y_sim", "N", N); + validate_non_negative_index("y_sim", "I", I); + std::vector > y_sim(I, Eigen::Matrix(N)); + stan::math::initialize(y_sim, DUMMY_VAR__); + stan::math::fill(y_sim, DUMMY_VAR__); + current_statement_begin__ = 110; + validate_non_negative_index("y_pred", "N_pred", N_pred); + validate_non_negative_index("y_pred", "I", I); + std::vector > y_pred(I, Eigen::Matrix(N_pred)); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 111; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 112; + for (int n = 1; n <= N; ++n) { + current_statement_begin__ = 113; + stan::model::assign(y_sim, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list())), + ((normal_rng(((multiply(stan::model::rvalue(X_std, stan::model::cons_list(stan::model::index_uni(n), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_std"), get_base1(beta, i, "beta", 1)) + multiply(stan::model::rvalue(get_base1(M_std, i, "M_std", 1), stan::model::cons_list(stan::model::index_uni(n), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "M_std[i]"), get_base1(gamma, i, "gamma", 1))) + get_base1(get_base1(f, i, "f", 1), n, "f", 2)), get_base1(sigma, i, "sigma", 1), base_rng__) * get_base1(sd_y, i, "sd_y", 1)) + get_base1(mean_y, i, "mean_y", 1)), + "assigning variable y_sim"); + } + current_statement_begin__ = 116; + for (int j = 1; j <= N_pred; ++j) { + current_statement_begin__ = 117; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + ((normal_rng(((multiply(stan::model::rvalue(X_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_pred_std"), get_base1(beta, i, "beta", 1)) + multiply(stan::model::rvalue(get_base1(M_pred_std, i, "M_pred_std", 1), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "M_pred_std[i]"), get_base1(gamma, i, "gamma", 1))) + get_base1(get_base1(f, i, "f", 1), (N + j), "f", 2)), get_base1(sigma, i, "sigma", 1), base_rng__) * get_base1(sd_y, i, "sd_y", 1)) + get_base1(mean_y, i, "mean_y", 1)), + "assigning variable y_pred"); + } + } + // validate, write generated quantities + current_statement_begin__ = 109; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + vars__.push_back(y_sim[k_0__](j_1__)); + } + } + current_statement_begin__ = 110; + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + vars__.push_back(y_pred[k_0__](j_1__)); + } + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_model7"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t gamma_j_1_max__ = M_K; + size_t gamma_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < gamma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t rho_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "rho" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t alpha_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < alpha_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t eta_j_1_max__ = sumN; + size_t eta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < eta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "eta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + size_t f_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < f_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "f" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t beta_j_1_max__ = (K - 1); + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t gamma_j_1_max__ = M_K; + size_t gamma_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < gamma_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < gamma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "gamma" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t rho_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "rho" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t alpha_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < alpha_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t eta_j_1_max__ = sumN; + size_t eta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < eta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "eta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + size_t f_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < f_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "f" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } +}; // model +} // namespace +typedef model_model7_namespace::model_model7 stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/src/stanExports_model8.cc b/src/stanExports_model8.cc new file mode 100644 index 0000000..9a9d908 --- /dev/null +++ b/src/stanExports_model8.cc @@ -0,0 +1,32 @@ +// Generated by rstantools. Do not edit by hand. + +#include +using namespace Rcpp ; +#include "stanExports_model8.h" + +RCPP_MODULE(stan_fit4model8_mod) { + + + class_ >("model_model8") + + .constructor() + + + .method("call_sampler", &rstan::stan_fit ::call_sampler) + .method("param_names", &rstan::stan_fit ::param_names) + .method("param_names_oi", &rstan::stan_fit ::param_names_oi) + .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) + .method("param_dims", &rstan::stan_fit ::param_dims) + .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) + .method("update_param_oi", &rstan::stan_fit ::update_param_oi) + .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) + .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) + .method("log_prob", &rstan::stan_fit ::log_prob) + .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) + .method("constrain_pars", &rstan::stan_fit ::constrain_pars) + .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) + .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) + .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) + .method("standalone_gqs", &rstan::stan_fit ::standalone_gqs) + ; +} diff --git a/src/stanExports_model8.h b/src/stanExports_model8.h new file mode 100644 index 0000000..326c58d --- /dev/null +++ b/src/stanExports_model8.h @@ -0,0 +1,939 @@ +// Generated by rstantools. Do not edit by hand. + +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.21.0 +#include +namespace model_model8_namespace { +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; +static int current_statement_begin__; +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_model8"); + reader.add_event(107, 105, "end", "model_model8"); + return reader; +} +#include +class model_model8 + : public stan::model::model_base_crtp { +private: + int N; + int I; + std::vector y; + int K; + matrix_d X; + int N_pred; + matrix_d X_pred; + matrix_d X_std; + matrix_d X_pred_std; + std::vector y_std; + std::vector mean_y; + std::vector sd_y; + std::vector time; + int sumN; +public: + model_model8(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, 0, pstream__); + } + model_model8(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : model_base_crtp(0) { + ctor_body(context__, random_seed__, pstream__); + } + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + current_statement_begin__ = -1; + static const char* function__ = "model_model8_namespace::model_model8"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + try { + // initialize data block variables from context__ + current_statement_begin__ = 21; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + check_greater_or_equal(function__, "N", N, 1); + current_statement_begin__ = 22; + context__.validate_dims("data initialization", "I", "int", context__.to_vec()); + I = int(0); + vals_i__ = context__.vals_i("I"); + pos__ = 0; + I = vals_i__[pos__++]; + check_greater_or_equal(function__, "I", I, 1); + current_statement_begin__ = 23; + validate_non_negative_index("y", "N", N); + validate_non_negative_index("y", "I", I); + context__.validate_dims("data initialization", "y", "vector_d", context__.to_vec(I,N)); + y = std::vector >(I, Eigen::Matrix(N)); + vals_r__ = context__.vals_r("y"); + pos__ = 0; + size_t y_j_1_max__ = N; + size_t y_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_k_0_max__; ++k_0__) { + y[k_0__](j_1__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 24; + context__.validate_dims("data initialization", "K", "int", context__.to_vec()); + K = int(0); + vals_i__ = context__.vals_i("K"); + pos__ = 0; + K = vals_i__[pos__++]; + check_greater_or_equal(function__, "K", K, 0); + current_statement_begin__ = 25; + validate_non_negative_index("X", "N", N); + validate_non_negative_index("X", "K", K); + context__.validate_dims("data initialization", "X", "matrix_d", context__.to_vec(N,K)); + X = Eigen::Matrix(N, K); + vals_r__ = context__.vals_r("X"); + pos__ = 0; + size_t X_j_2_max__ = K; + size_t X_j_1_max__ = N; + for (size_t j_2__ = 0; j_2__ < X_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_j_1_max__; ++j_1__) { + X(j_1__, j_2__) = vals_r__[pos__++]; + } + } + current_statement_begin__ = 26; + context__.validate_dims("data initialization", "N_pred", "int", context__.to_vec()); + N_pred = int(0); + vals_i__ = context__.vals_i("N_pred"); + pos__ = 0; + N_pred = vals_i__[pos__++]; + check_greater_or_equal(function__, "N_pred", N_pred, 1); + current_statement_begin__ = 27; + validate_non_negative_index("X_pred", "N_pred", N_pred); + validate_non_negative_index("X_pred", "K", K); + context__.validate_dims("data initialization", "X_pred", "matrix_d", context__.to_vec(N_pred,K)); + X_pred = Eigen::Matrix(N_pred, K); + vals_r__ = context__.vals_r("X_pred"); + pos__ = 0; + size_t X_pred_j_2_max__ = K; + size_t X_pred_j_1_max__ = N_pred; + for (size_t j_2__ = 0; j_2__ < X_pred_j_2_max__; ++j_2__) { + for (size_t j_1__ = 0; j_1__ < X_pred_j_1_max__; ++j_1__) { + X_pred(j_1__, j_2__) = vals_r__[pos__++]; + } + } + // initialize transformed data variables + current_statement_begin__ = 31; + validate_non_negative_index("X_std", "N", N); + validate_non_negative_index("X_std", "K", K); + X_std = Eigen::Matrix(N, K); + stan::math::fill(X_std, DUMMY_VAR__); + current_statement_begin__ = 32; + validate_non_negative_index("X_pred_std", "N_pred", N_pred); + validate_non_negative_index("X_pred_std", "K", K); + X_pred_std = Eigen::Matrix(N_pred, K); + stan::math::fill(X_pred_std, DUMMY_VAR__); + current_statement_begin__ = 33; + validate_non_negative_index("y_std", "N", N); + validate_non_negative_index("y_std", "I", I); + y_std = std::vector >(I, Eigen::Matrix(N)); + stan::math::fill(y_std, DUMMY_VAR__); + current_statement_begin__ = 34; + validate_non_negative_index("mean_y", "I", I); + mean_y = std::vector(I, double(0)); + stan::math::fill(mean_y, DUMMY_VAR__); + current_statement_begin__ = 35; + validate_non_negative_index("sd_y", "I", I); + sd_y = std::vector(I, double(0)); + stan::math::fill(sd_y, DUMMY_VAR__); + current_statement_begin__ = 36; + validate_non_negative_index("time", "(N + N_pred)", (N + N_pred)); + time = std::vector((N + N_pred), double(0)); + stan::math::fill(time, DUMMY_VAR__); + current_statement_begin__ = 37; + sumN = int(0); + stan::math::fill(sumN, std::numeric_limits::min()); + stan::math::assign(sumN,(N + N_pred)); + // execute transformed data statements + current_statement_begin__ = 39; + for (int t = 1; t <= sumN; ++t) { + current_statement_begin__ = 40; + stan::model::assign(time, + stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), + t, + "assigning variable time"); + } + { + current_statement_begin__ = 44; + validate_non_negative_index("mean_X", "K", K); + Eigen::Matrix mean_X(K); + stan::math::initialize(mean_X, DUMMY_VAR__); + stan::math::fill(mean_X, DUMMY_VAR__); + current_statement_begin__ = 45; + validate_non_negative_index("sd_X", "K", K); + Eigen::Matrix sd_X(K); + stan::math::initialize(sd_X, DUMMY_VAR__); + stan::math::fill(sd_X, DUMMY_VAR__); + current_statement_begin__ = 47; + for (int k = 1; k <= K; ++k) { + current_statement_begin__ = 48; + stan::model::assign(mean_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + mean(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable mean_X"); + current_statement_begin__ = 49; + stan::model::assign(sd_X, + stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list()), + sd(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X")), + "assigning variable sd_X"); + current_statement_begin__ = 50; + stan::model::assign(X_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_std"); + current_statement_begin__ = 51; + stan::model::assign(X_pred_std, + stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), + divide(subtract(stan::model::rvalue(X_pred, stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(k), stan::model::nil_index_list())), "X_pred"), get_base1(mean_X, k, "mean_X", 1)), get_base1(sd_X, k, "sd_X", 1)), + "assigning variable X_pred_std"); + } + current_statement_begin__ = 54; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 55; + stan::model::assign(mean_y, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + mean(get_base1(y, i, "y", 1)), + "assigning variable mean_y"); + current_statement_begin__ = 56; + stan::model::assign(sd_y, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + sd(get_base1(y, i, "y", 1)), + "assigning variable sd_y"); + current_statement_begin__ = 57; + stan::model::assign(y_std, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + divide(subtract(get_base1(y, i, "y", 1), get_base1(mean_y, i, "mean_y", 1)), get_base1(sd_y, i, "sd_y", 1)), + "assigning variable y_std"); + } + } + // validate transformed data + current_statement_begin__ = 37; + check_greater_or_equal(function__, "sumN", sumN, 1); + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 63; + validate_non_negative_index("sigma", "I", I); + num_params_r__ += (1 * I); + current_statement_begin__ = 64; + validate_non_negative_index("beta", "K", K); + validate_non_negative_index("beta", "I", I); + num_params_r__ += ((K - 1) * I); + current_statement_begin__ = 65; + validate_non_negative_index("rho", "I", I); + num_params_r__ += (1 * I); + current_statement_begin__ = 66; + validate_non_negative_index("alpha", "I", I); + num_params_r__ += (1 * I); + current_statement_begin__ = 67; + validate_non_negative_index("eta", "sumN", sumN); + validate_non_negative_index("eta", "I", I); + num_params_r__ += (sumN * I); + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + ~model_model8() { } + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + typedef double local_scalar_t__; + stan::io::writer writer__(params_r__, params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + current_statement_begin__ = 63; + if (!(context__.contains_r("sigma"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable sigma missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + validate_non_negative_index("sigma", "I", I); + context__.validate_dims("parameter initialization", "sigma", "double", context__.to_vec(I)); + std::vector sigma(I, double(0)); + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + sigma[k_0__] = vals_r__[pos__++]; + } + size_t sigma_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < sigma_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, sigma[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 64; + if (!(context__.contains_r("beta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable beta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("beta"); + pos__ = 0U; + validate_non_negative_index("beta", "K", K); + validate_non_negative_index("beta", "I", I); + context__.validate_dims("parameter initialization", "beta", "vector_d", context__.to_vec(I,K)); + std::vector > beta(I, Eigen::Matrix(K)); + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + beta[k_0__](j_1__) = vals_r__[pos__++]; + } + } + size_t beta_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < beta_i_0_max__; ++i_0__) { + try { + writer__.simplex_unconstrain(beta[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable beta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 65; + if (!(context__.contains_r("rho"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable rho missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("rho"); + pos__ = 0U; + validate_non_negative_index("rho", "I", I); + context__.validate_dims("parameter initialization", "rho", "double", context__.to_vec(I)); + std::vector rho(I, double(0)); + size_t rho_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) { + rho[k_0__] = vals_r__[pos__++]; + } + size_t rho_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < rho_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, rho[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable rho: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 66; + if (!(context__.contains_r("alpha"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable alpha missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("alpha"); + pos__ = 0U; + validate_non_negative_index("alpha", "I", I); + context__.validate_dims("parameter initialization", "alpha", "double", context__.to_vec(I)); + std::vector alpha(I, double(0)); + size_t alpha_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < alpha_k_0_max__; ++k_0__) { + alpha[k_0__] = vals_r__[pos__++]; + } + size_t alpha_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < alpha_i_0_max__; ++i_0__) { + try { + writer__.scalar_lb_unconstrain(0, alpha[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable alpha: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + current_statement_begin__ = 67; + if (!(context__.contains_r("eta"))) + stan::lang::rethrow_located(std::runtime_error(std::string("Variable eta missing")), current_statement_begin__, prog_reader__()); + vals_r__ = context__.vals_r("eta"); + pos__ = 0U; + validate_non_negative_index("eta", "sumN", sumN); + validate_non_negative_index("eta", "I", I); + context__.validate_dims("parameter initialization", "eta", "vector_d", context__.to_vec(I,sumN)); + std::vector > eta(I, Eigen::Matrix(sumN)); + size_t eta_j_1_max__ = sumN; + size_t eta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < eta_k_0_max__; ++k_0__) { + eta[k_0__](j_1__) = vals_r__[pos__++]; + } + } + size_t eta_i_0_max__ = I; + for (size_t i_0__ = 0; i_0__ < eta_i_0_max__; ++i_0__) { + try { + writer__.vector_unconstrain(eta[i_0__]); + } catch (const std::exception& e) { + stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable eta: ") + e.what()), current_statement_begin__, prog_reader__()); + } + } + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + template + T__ log_prob(std::vector& params_r__, + std::vector& params_i__, + std::ostream* pstream__ = 0) const { + typedef T__ local_scalar_t__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // dummy to suppress unused var warning + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + try { + stan::io::reader in__(params_r__, params_i__); + // model parameters + current_statement_begin__ = 63; + std::vector sigma; + size_t sigma_d_0_max__ = I; + sigma.reserve(sigma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { + if (jacobian__) + sigma.push_back(in__.scalar_lb_constrain(0, lp__)); + else + sigma.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 64; + std::vector > beta; + size_t beta_d_0_max__ = I; + beta.reserve(beta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < beta_d_0_max__; ++d_0__) { + if (jacobian__) + beta.push_back(in__.simplex_constrain(K, lp__)); + else + beta.push_back(in__.simplex_constrain(K)); + } + current_statement_begin__ = 65; + std::vector rho; + size_t rho_d_0_max__ = I; + rho.reserve(rho_d_0_max__); + for (size_t d_0__ = 0; d_0__ < rho_d_0_max__; ++d_0__) { + if (jacobian__) + rho.push_back(in__.scalar_lb_constrain(0, lp__)); + else + rho.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 66; + std::vector alpha; + size_t alpha_d_0_max__ = I; + alpha.reserve(alpha_d_0_max__); + for (size_t d_0__ = 0; d_0__ < alpha_d_0_max__; ++d_0__) { + if (jacobian__) + alpha.push_back(in__.scalar_lb_constrain(0, lp__)); + else + alpha.push_back(in__.scalar_lb_constrain(0)); + } + current_statement_begin__ = 67; + std::vector > eta; + size_t eta_d_0_max__ = I; + eta.reserve(eta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < eta_d_0_max__; ++d_0__) { + if (jacobian__) + eta.push_back(in__.vector_constrain(sumN, lp__)); + else + eta.push_back(in__.vector_constrain(sumN)); + } + // transformed parameters + current_statement_begin__ = 71; + validate_non_negative_index("f", "sumN", sumN); + validate_non_negative_index("f", "I", I); + std::vector > f(I, Eigen::Matrix(sumN)); + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f, DUMMY_VAR__); + // transformed parameters block statements + current_statement_begin__ = 72; + for (int i = 1; i <= I; ++i) { + { + current_statement_begin__ = 73; + validate_non_negative_index("K_matrix", "sumN", sumN); + validate_non_negative_index("K_matrix", "sumN", sumN); + Eigen::Matrix K_matrix(sumN, sumN); + stan::math::initialize(K_matrix, DUMMY_VAR__); + stan::math::fill(K_matrix, DUMMY_VAR__); + stan::math::assign(K_matrix,add(cov_exp_quad(time, get_base1(alpha, i, "alpha", 1), get_base1(rho, i, "rho", 1)), diag_matrix(rep_vector(1e-9, sumN)))); + current_statement_begin__ = 75; + validate_non_negative_index("L_K", "sumN", sumN); + validate_non_negative_index("L_K", "sumN", sumN); + Eigen::Matrix L_K(sumN, sumN); + stan::math::initialize(L_K, DUMMY_VAR__); + stan::math::fill(L_K, DUMMY_VAR__); + stan::math::assign(L_K,cholesky_decompose(K_matrix)); + current_statement_begin__ = 76; + stan::model::assign(f, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + multiply(L_K, get_base1(eta, i, "eta", 1)), + "assigning variable f"); + } + } + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 71; + size_t f_k_0_max__ = I; + size_t f_j_1_max__ = sumN; + for (size_t k_0__ = 0; k_0__ < f_k_0_max__; ++k_0__) { + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + if (stan::math::is_uninitialized(f[k_0__](j_1__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: f" << "[" << k_0__ << "]" << "(" << j_1__ << ")"; + stan::lang::rethrow_located(std::runtime_error(std::string("Error initializing variable f: ") + msg__.str()), current_statement_begin__, prog_reader__()); + } + } + } + // model body + current_statement_begin__ = 81; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 82; + lp_accum__.add(normal_log(get_base1(sigma, i, "sigma", 1), 0, 1)); + current_statement_begin__ = 83; + lp_accum__.add(normal_log(get_base1(rho, i, "rho", 1), 0, 3)); + current_statement_begin__ = 84; + lp_accum__.add(normal_log(get_base1(alpha, i, "alpha", 1), 0, 1)); + current_statement_begin__ = 85; + lp_accum__.add(normal_log(get_base1(eta, i, "eta", 1), 0, 1)); + current_statement_begin__ = 86; + lp_accum__.add(normal_log(get_base1(y_std, i, "y_std", 1), add(multiply(X_std, get_base1(beta, i, "beta", 1)), stan::model::rvalue(get_base1(f, i, "f", 1), stan::model::cons_list(stan::model::index_min_max(1, N), stan::model::nil_index_list()), "f[i]")), get_base1(sigma, i, "sigma", 1))); + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + lp_accum__.add(lp__); + return lp_accum__.sum(); + } // log_prob() + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("sigma"); + names__.push_back("beta"); + names__.push_back("rho"); + names__.push_back("alpha"); + names__.push_back("eta"); + names__.push_back("f"); + names__.push_back("y_sim"); + names__.push_back("y_pred"); + } + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(I); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(K); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(sumN); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(sumN); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(I); + dims__.push_back(N_pred); + dimss__.push_back(dims__); + } + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + vars__.resize(0); + stan::io::reader in__(params_r__, params_i__); + static const char* function__ = "model_model8_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + std::vector sigma; + size_t sigma_d_0_max__ = I; + sigma.reserve(sigma_d_0_max__); + for (size_t d_0__ = 0; d_0__ < sigma_d_0_max__; ++d_0__) { + sigma.push_back(in__.scalar_lb_constrain(0)); + } + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + vars__.push_back(sigma[k_0__]); + } + std::vector > beta; + size_t beta_d_0_max__ = I; + beta.reserve(beta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < beta_d_0_max__; ++d_0__) { + beta.push_back(in__.simplex_constrain(K)); + } + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + vars__.push_back(beta[k_0__](j_1__)); + } + } + std::vector rho; + size_t rho_d_0_max__ = I; + rho.reserve(rho_d_0_max__); + for (size_t d_0__ = 0; d_0__ < rho_d_0_max__; ++d_0__) { + rho.push_back(in__.scalar_lb_constrain(0)); + } + size_t rho_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) { + vars__.push_back(rho[k_0__]); + } + std::vector alpha; + size_t alpha_d_0_max__ = I; + alpha.reserve(alpha_d_0_max__); + for (size_t d_0__ = 0; d_0__ < alpha_d_0_max__; ++d_0__) { + alpha.push_back(in__.scalar_lb_constrain(0)); + } + size_t alpha_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < alpha_k_0_max__; ++k_0__) { + vars__.push_back(alpha[k_0__]); + } + std::vector > eta; + size_t eta_d_0_max__ = I; + eta.reserve(eta_d_0_max__); + for (size_t d_0__ = 0; d_0__ < eta_d_0_max__; ++d_0__) { + eta.push_back(in__.vector_constrain(sumN)); + } + size_t eta_j_1_max__ = sumN; + size_t eta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < eta_k_0_max__; ++k_0__) { + vars__.push_back(eta[k_0__](j_1__)); + } + } + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + if (!include_tparams__ && !include_gqs__) return; + try { + // declare and define transformed parameters + current_statement_begin__ = 71; + validate_non_negative_index("f", "sumN", sumN); + validate_non_negative_index("f", "I", I); + std::vector > f(I, Eigen::Matrix(sumN)); + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f, DUMMY_VAR__); + // do transformed parameters statements + current_statement_begin__ = 72; + for (int i = 1; i <= I; ++i) { + { + current_statement_begin__ = 73; + validate_non_negative_index("K_matrix", "sumN", sumN); + validate_non_negative_index("K_matrix", "sumN", sumN); + Eigen::Matrix K_matrix(sumN, sumN); + stan::math::initialize(K_matrix, DUMMY_VAR__); + stan::math::fill(K_matrix, DUMMY_VAR__); + stan::math::assign(K_matrix,add(cov_exp_quad(time, get_base1(alpha, i, "alpha", 1), get_base1(rho, i, "rho", 1)), diag_matrix(rep_vector(1e-9, sumN)))); + current_statement_begin__ = 75; + validate_non_negative_index("L_K", "sumN", sumN); + validate_non_negative_index("L_K", "sumN", sumN); + Eigen::Matrix L_K(sumN, sumN); + stan::math::initialize(L_K, DUMMY_VAR__); + stan::math::fill(L_K, DUMMY_VAR__); + stan::math::assign(L_K,cholesky_decompose(K_matrix)); + current_statement_begin__ = 76; + stan::model::assign(f, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + multiply(L_K, get_base1(eta, i, "eta", 1)), + "assigning variable f"); + } + } + if (!include_gqs__ && !include_tparams__) return; + // validate transformed parameters + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + // write transformed parameters + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + size_t f_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < f_k_0_max__; ++k_0__) { + vars__.push_back(f[k_0__](j_1__)); + } + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 92; + validate_non_negative_index("y_sim", "N", N); + validate_non_negative_index("y_sim", "I", I); + std::vector > y_sim(I, Eigen::Matrix(N)); + stan::math::initialize(y_sim, DUMMY_VAR__); + stan::math::fill(y_sim, DUMMY_VAR__); + current_statement_begin__ = 93; + validate_non_negative_index("y_pred", "N_pred", N_pred); + validate_non_negative_index("y_pred", "I", I); + std::vector > y_pred(I, Eigen::Matrix(N_pred)); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred, DUMMY_VAR__); + // generated quantities statements + current_statement_begin__ = 94; + for (int i = 1; i <= I; ++i) { + current_statement_begin__ = 95; + for (int n = 1; n <= N; ++n) { + current_statement_begin__ = 96; + stan::model::assign(y_sim, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(n), stan::model::nil_index_list())), + ((normal_rng((multiply(stan::model::rvalue(X_std, stan::model::cons_list(stan::model::index_uni(n), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_std"), get_base1(beta, i, "beta", 1)) + get_base1(get_base1(f, i, "f", 1), n, "f", 2)), get_base1(sigma, i, "sigma", 1), base_rng__) * get_base1(sd_y, i, "sd_y", 1)) + get_base1(mean_y, i, "mean_y", 1)), + "assigning variable y_sim"); + } + current_statement_begin__ = 99; + for (int j = 1; j <= N_pred; ++j) { + current_statement_begin__ = 100; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list())), + ((normal_rng((multiply(stan::model::rvalue(X_pred_std, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "X_pred_std"), get_base1(beta, i, "beta", 1)) + get_base1(get_base1(f, i, "f", 1), (N + j), "f", 2)), get_base1(sigma, i, "sigma", 1), base_rng__) * get_base1(sd_y, i, "sd_y", 1)) + get_base1(mean_y, i, "mean_y", 1)), + "assigning variable y_pred"); + } + } + // validate, write generated quantities + current_statement_begin__ = 92; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + vars__.push_back(y_sim[k_0__](j_1__)); + } + } + current_statement_begin__ = 93; + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + vars__.push_back(y_pred[k_0__](j_1__)); + } + } + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + std::string model_name() const { + return "model_model8"; + } + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t beta_j_1_max__ = K; + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t rho_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "rho" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t alpha_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < alpha_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t eta_j_1_max__ = sumN; + size_t eta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < eta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "eta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + size_t f_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < f_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "f" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + size_t sigma_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < sigma_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t beta_j_1_max__ = (K - 1); + size_t beta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < beta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < beta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t rho_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "rho" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t alpha_k_0_max__ = I; + for (size_t k_0__ = 0; k_0__ < alpha_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha" << '.' << k_0__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + size_t eta_j_1_max__ = sumN; + size_t eta_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < eta_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < eta_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "eta" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + if (!include_gqs__ && !include_tparams__) return; + if (include_tparams__) { + size_t f_j_1_max__ = sumN; + size_t f_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < f_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < f_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "f" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } + if (!include_gqs__) return; + size_t y_sim_j_1_max__ = N; + size_t y_sim_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_sim_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_sim_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_sim" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + size_t y_pred_j_1_max__ = N_pred; + size_t y_pred_k_0_max__ = I; + for (size_t j_1__ = 0; j_1__ < y_pred_j_1_max__; ++j_1__) { + for (size_t k_0__ = 0; k_0__ < y_pred_k_0_max__; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ + 1 << '.' << j_1__ + 1; + param_names__.push_back(param_name_stream__.str()); + } + } + } +}; // model +} // namespace +typedef model_model8_namespace::model_model8 stan_model; +#ifndef USING_R +stan::model::model_base& new_model( + stan::io::var_context& data_context, + unsigned int seed, + std::ostream* msg_stream) { + stan_model* m = new stan_model(data_context, seed, msg_stream); + return *m; +} +#endif +#endif diff --git a/vignettes/.gitignore b/vignettes/.gitignore deleted file mode 100644 index 097b241..0000000 --- a/vignettes/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.html -*.R diff --git a/vignettes/germany.Rmd b/vignettes/germany.Rmd deleted file mode 100644 index 951e1de..0000000 --- a/vignettes/germany.Rmd +++ /dev/null @@ -1,284 +0,0 @@ ---- -## Copyright 2021 Google LLC -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## https://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## ---- -title: "Germany Reunification" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Germany Reunification} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>", - echo = TRUE, - warning = FALSE, - message = FALSE, - out.width = '90%', - out.height = '600px', - fig.align='center' -) -set.seed(123) -``` - -## Background - -In this vignette I show how a simple Bayesian model can be used to generate a synthetic control to estimate the effect of the 1990 German reunification on per-capita GDP in West Germany. Figure 1 (Figure 1a, [Abadie, Diamond, and Hainmueller 2015](https://onlinelibrary.wiley.com/doi/abs/10.1111/ajps.12116)) shows the trajectory of GDP per-capita for West Germany and for a synthetic control generated using a the canonical approach described in ([Abadie and Gardeazabal, 2003](https://www.aeaweb.org/articles?id=10.1257/000282803321455188); [Abadie et al., 2010](https://www.aeaweb.org/articles?id=10.1257/000282803321455188)). - - -```{r original_figure, echo = FALSE, fig.cap = "Figure 1: Trends in per Capita GDP: West Germany versus Synthetic West Germany"} -knitr::include_graphics(path = "https://ignacio.martinez.fyi/synthetic_control/german_reunification.png") - -``` - -## Bayesian Synthetic Control - -Let $y^\star_t$ be the standardized per Capita GDP of West Germany in period $t$, -and $x^\star_t$ be a vector of the standardized per Capita GDP of -the donor countries in period $t$. - -$$ -\begin{aligned} -y^\star_t &\sim N(x^\star_t\beta , \sigma^2) \\ -\beta &\sim \text{Dir}(1)\\ -\sigma &\sim N^+(0,1) -\end{aligned} -$$ - -Notice that $\beta$ is a simplex. Therefore, the weights ares bound to be -positive and sum to 1. Further more, notice that I use a non-informative prior -for the weights. - - -```{r time_tiles} -library(dplyr) -library(ggplot2) -library(bsynth) -# Create a new factory -germany_synth <- bayesianSynth$new(data = germany, - time = year, - id = country, - treated = D, - outcome = gdp, - ci_width = 0.95) -# Visualize the timeline -germany_synth$timeTiles + - ggplot2::xlab("Year") + - ggplot2::ylab("Country") - -``` - -```{r results="hide"} -# Fit the model -germany_synth$fit() - -# Visualize the synthetic control -germany_synth$synthetic + - ggplot2::xlab("Year") + - ggplot2::ylab("Per Capita GDP (PPP, 2002 USD)") + - ggplot2::scale_y_continuous(labels=scales::dollar_format()) - -# Visualize the treatment effect -germany_synth$effect() + - ggplot2::xlab("Year") + - ggplot2::ylab("Gap in Per Capita GDP (PPP, 2002 USD)") + - ggplot2::scale_y_continuous(labels = scales::dollar_format()) - -``` - -## GSynth - -In this section of the vignette I use the generalized synthetic control method based on interactive fixed effect models and implemented in [{gsynth}](https://github.com/xuyiqing/gsynth) to provide an additional comparison. - -```{r gsynth} - -library(gsynth) - -out <- - gsynth( - gdp ~ D, - data = germany, - index = c("country", "year"), - force = "two-way", - CV = TRUE, - r = c(0, 5), - se = TRUE, - inference = "parametric", - nboots = 10000, - parallel = FALSE, - cores = 4 - ) - -gsynth_plot <- plot(out, - xlab = "Period", - main = "GSynth: German Reunification", - ylab = "Effect") - -``` - -## Side by Side - -```{r side_by_side} - -intervention <- germany %>% - filter(D == 1) %>% - summarise(year = min(year)) %>% - pull(year) - - -bsynth_plot_data <- - germany_synth$plotData %>% - select(x = year, y = tau, ymin = tau_LB, ymax = tau_UB) %>% - mutate(method = "bsynth") - -timeXwalk <- germany %>% - select(year) %>% - distinct() %>% - arrange(year) %>% - mutate(t = 1:n()) - -gsynth_plot_data <- gsynth_plot$data %>% - mutate(t = time + 30) %>% - inner_join(timeXwalk, by = "t") %>% - select(x = year, y = ATT, ymin = CI.lower, ymax = CI.upper) %>% - mutate(method = "GSynth") - -plot_data <- bind_rows(gsynth_plot_data, bsynth_plot_data) - -ggplot(data = plot_data, aes(x = x)) + - geom_line(aes(y = y)) + - geom_ribbon(aes(ymin = ymin, ymax = ymax), - color = "gray", - alpha = 0.2) + - theme_bw(base_size = 14) + - theme( - legend.position = "none", - panel.border = element_blank(), - axis.line = element_line() - ) + - geom_vline(xintercept = intervention, linetype = "dashed") + - facet_grid(cols = dplyr::vars(method)) + - xlab("Year") + - ylab("Gap in Per Capita GDP (PPP, 2002 USD)") + - scale_y_continuous(labels = scales::dollar_format()) - - -``` - -## Placebo - -### GSynth - -```{r} - -placebo_germany <- germany %>% - filter(year < intervention) %>% - mutate(D = case_when( - (country == "West Germany" & - year >= (intervention - lubridate::years(5)) )~ 1, - TRUE ~ 0 - )) - -out <- - gsynth( - gdp ~ D, - data = placebo_germany, - index = c("country", "year"), - force = "two-way", - CV = TRUE, - r = c(0, 5), - se = TRUE, - inference = "parametric", - nboots = 10000, - parallel = FALSE, - cores = 4 - ) - -gsynth_plot <- plot(out, - xlab = "Period", - main = "GSynth: Placebo German Reunification", - ylab = "Effect") -``` - - -### bsynth - -```{r results="hide"} -germany_synth_placebo <- bayesianSynth$new( - data = placebo_germany, - time = year, - id = country, - treated = D, - outcome = gdp, - ci_width = 0.95 -) - -germany_synth_placebo$fit() - -germany_synth_placebo$timeTiles - - -germany_synth_placebo$effect() + - ggplot2::xlab("Year") + - ggplot2::ylab("Gap in Per Capita GDP (PPP, 2002 USD)") + - ggplot2::scale_y_continuous(labels = scales::dollar_format()) - -``` - - -### Side by Side - -```{r} -bsynth_plot_data <- - germany_synth_placebo$plotData %>% - select(x = year, y = tau, ymin = tau_LB, ymax = tau_UB) %>% - mutate(method = "bsynth") - -timeXwalk <- germany %>% - select(year) %>% - distinct() %>% - arrange(year) %>% - mutate(t = 1:n()) - -gsynth_plot_data <- gsynth_plot$data %>% - mutate(t = time + 25) %>% - inner_join(timeXwalk, by = "t") %>% - select(x = year, y = ATT, ymin = CI.lower, ymax = CI.upper) %>% - mutate(method = "GSynth") - -plot_data <- bind_rows(gsynth_plot_data, bsynth_plot_data) - -ggplot(data = plot_data, aes(x = x)) + - geom_line(aes(y = y)) + - geom_ribbon(aes(ymin = ymin, ymax = ymax), - color = "gray", - alpha = 0.2) + - theme_bw(base_size = 14) + - theme( - legend.position = "none", - panel.border = element_blank(), - axis.line = element_line() - ) + - geom_vline(xintercept = intervention - lubridate::years(5), - linetype = "dashed") + - facet_grid(cols = dplyr::vars(method)) + - xlab("Year") + - ylab("Gap in Per Capita GDP (PPP, 2002 USD)") + - scale_y_continuous(labels = scales::dollar_format()) -``` - diff --git a/vignettes/multiple_treated.Rmd b/vignettes/multiple_treated.Rmd deleted file mode 100644 index 4240d62..0000000 --- a/vignettes/multiple_treated.Rmd +++ /dev/null @@ -1,99 +0,0 @@ ---- -## Copyright 2021 Google LLC -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## https://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## ---- -title: "Multiple Treated" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Multiple Treated} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>", - echo = TRUE, - warning = FALSE, - message = FALSE, - out.width = '90%', - out.height = '600px', - fig.align='center' -) -set.seed(123) -``` - -## Background - -In this vignette I show how {bsynth} can be used for causal estimation when you - have multiple treated accounts. In particular, I use the synthetic data exaple - from [Xu, Yiqing, 2017](http://dx.doi.org/10.1017/pan.2016.2). In this example, - the treatment starts in period 21 and increases by one each period - (e.g. the effect is 5 period 25 and 10 in period 30). - -```{r original_figure, echo = FALSE, fig.cap = "Figure 1: Xu, Yiqing, 2017", out.height = '400px'} -knitr::include_graphics(path = "https://ignacio.martinez.fyi/synthetic_control/gsynth.png") - -``` - -## Bayesian Synthetic Control with Covariates - -```{r setup, out.height = '800px'} -library(bsynth) -ci_width <- 0.95 -data(gsynth, package = "gsynth") -dplyr::glimpse(simdata) - -outcome_data <- simdata %>% - dplyr::select(time, id, D, Y) - -covariates <- simdata %>% - dplyr::select(time, id, X1, X2) - -synth <- - bsynth::bayesianSynth$new( - data = outcome_data, - time = time, - id = id, - treated = D, - outcome = Y, - ci_width = ci_width, - covariates = covariates - ) - -synth$timeTiles + - ggplot2::theme(text = ggplot2::element_text(size=6)) - -``` - -### Fit - -```{r fit, results="hide"} -synth$fit(cores = 4) -``` - -### Visualize the synthetic controls for each treated unit - -```{r} -synth$synthetic -``` -### Visualize the treatment effect - -```{r} -synth$effect(subset = c("Average"), facet = FALSE) + - ggplot2::scale_y_continuous(breaks=seq(-2,12,2)) -``` - diff --git a/vignettes/prop99.Rmd b/vignettes/prop99.Rmd deleted file mode 100644 index 7b56f43..0000000 --- a/vignettes/prop99.Rmd +++ /dev/null @@ -1,110 +0,0 @@ ---- -## Copyright 2021 Google LLC -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## https://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## ---- -title: "Proposition 99" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{Proposition 99} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>", - echo = TRUE, - warning = FALSE, - message = FALSE, - out.width = '90%', - out.height = '600px', - fig.align='center' -) -set.seed(123) -``` - - -```{r time_tiles, out.height = '900px'} -library(bsynth) - -prop99_synth <- - bayesianSynth$new( - data = prop99_df, - time = year, - id = state, - treated = D, - outcome = packs, - GP = FALSE, - ci_width = 0.95 - ) - -prop99_synth$timeTiles + - ggplot2::xlab("Year") + - ggplot2::ylab("State") + - ggplot2::theme(text = ggplot2::element_text(size=12)) - - -``` - -## Fit without Gaussian Process - -```{r results="hide"} -prop99_synth$fit(control = list(adapt_delta = 0.999, max_treedepth = 15), - cores = 4, - refresh = 0, - seed = 1982, - warmup = 4000, - iter = 6000) - -prop99_synth$synthetic + - ggplot2::xlab("Year") + - ggplot2::ylab("Per-Capita Cigarrette Sales (in Packs)") - -prop99_synth$effect() + - ggplot2::xlab("Year") + - ggplot2::ylab("Change in Per-Capita Cigarrette Sales (in Packs)") -``` - -## Fit with Gaussian Process - -```{r results="hide"} -prop99_synth_gp <- - bayesianSynth$new( - data = prop99_df, - time = year, - id = state, - treated = D, - outcome = packs, - GP = TRUE, - ci_width = 0.95 - ) - -prop99_synth_gp$fit(control = list(adapt_delta = 0.999, max_treedepth = 15), - cores = 4, - refresh = 0, - seed = 1982, - warmup = 4000, - iter = 6000) - -prop99_synth_gp$synthetic + - ggplot2::xlab("Year") + - ggplot2::ylab("Per-Capita Cigarrette Sales (in Packs)") - -prop99_synth_gp$effect() + - ggplot2::xlab("Year") + - ggplot2::ylab("Change in Per-Capita Cigarrette Sales (in Packs)") -``` - diff --git a/vignettes/with_covariates.Rmd b/vignettes/with_covariates.Rmd deleted file mode 100644 index 24c77a1..0000000 --- a/vignettes/with_covariates.Rmd +++ /dev/null @@ -1,216 +0,0 @@ ---- -## Copyright 2021 Google LLC -## -## Licensed under the Apache License, Version 2.0 (the "License"); -## you may not use this file except in compliance with the License. -## You may obtain a copy of the License at -## -## https://www.apache.org/licenses/LICENSE-2.0 -## -## Unless required by applicable law or agreed to in writing, software -## distributed under the License is distributed on an "AS IS" BASIS, -## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -## See the License for the specific language governing permissions and -## limitations under the License. -## ---- -title: "With Covariates" -output: rmarkdown::html_vignette -vignette: > - %\VignetteIndexEntry{With Covariates} - %\VignetteEngine{knitr::rmarkdown} - %\VignetteEncoding{UTF-8} ---- - -```{r, include = FALSE} -knitr::opts_chunk$set( - collapse = TRUE, - comment = "#>", - echo = TRUE, - warning = FALSE, - message = FALSE, - out.width = '90%', - out.height = '600px', - fig.align='center' -) -``` - -## Generate Synthetic Data - -```{r} -set.seed(1) -library(dplyr) -library(ggplot2) - -true_lift <- tibble(t = 1:100) %>% - mutate( - x = t - 71, - lift = case_when(x < 0 ~ 0, - TRUE ~ dgamma(x = x, shape = 2, scale = 10)), - lift = 3*lift) %>% - select(-x) - - - - -fake_data <- - tibble(x1 = 100 + arima.sim(model = list(ar = 0.99), n = 100), - x2 = 300 + arima.sim(model = list(ar = 0.57), n = 100), - x3 = 50 + arima.sim(model = list(ar = 0.79, ma = 0.8), n = 100), - m1 = rnorm(n = 100, mean = -2, sd = 1), - m2 = rnorm(n = 100, mean = 0.5, sd = 1), - m3 = rnorm(n = 100, mean = 0.25, sd = 1), - m4 = rnorm(n = 100, mean = 0.15, sd = 1)) %>% - mutate( - y = as.numeric(0.2 * x1 + - 0.3 * x2 + - 0.5 * x3 - - 6.5*m1 - - 3.1*m2 - - 2.3*m3 - - 1.1*m4 + - rnorm(100)), - t = 1:n() - ) %>% - inner_join(true_lift, by = "t") %>% - mutate( - y0 = y, - d = y * lift, - y = y * (1+lift) - ) - -true_lift <- fake_data %>% - select(t, lift, d) - -true_lift_table <- fake_data %>% - filter(t >= 71) %>% - summarise(sum_y0 = sum(y0), - sum_y1 = sum(y), - lift = (sum_y1 - sum_y0)/sum_y0) - -truth <- true_lift_table %>% - pull(lift) - -fake_data <- fake_data%>% - select(-lift, -d, -y0) - -ggplot(data = fake_data, aes(x=t, y=y)) + - geom_line() + - xlab("Time") + - ylab("y") + - theme_bw() + - geom_vline(xintercept = 71, linetype = "dashed") + - ggtitle(label = "Time Series of Interest", - subtitle = "Intervention happens in period 71") - -ggplot(data = true_lift, aes(x=t,y=lift)) + - geom_line() + - xlab("Time") + - ylab("Lift") + - theme_bw() + - scale_y_continuous(labels = scales::percent_format(accuracy = 1)) + - geom_vline(xintercept = 71, linetype = "dashed") + - ggtitle(label = "True Lift", - subtitle = "Intervention happens in period 71") -``` - -* In these synthetic data the true lift between periods 71 and 100 is `r scales::percent(truth, accuracy = 0.1)`. - - -## Simplest Model - - -```{r} -library(bsynth) - -ci_width <- 0.95 - -long_data <- fake_data %>% - select(-m1, -m2, -m3, -m4) %>% - tidyr::pivot_longer(-t, names_to = "id", values_to = "y") %>% - mutate(D = case_when(id == "y" & t >= 71 ~ 1, - TRUE ~ 0)) - -synth <- bsynth::bayesianSynth$new( - data = long_data, - time = t, - id = id, - treated = D, - outcome = y, - ci_width = ci_width -) - - -synth$fit(cores = 4) - -synth$effect() - -synth$synthetic - -synth$liftDraws( - from = 71, - to = 100, - breaks = c(0.01, 0.06, 0.09), - break_names = c("Not worth it", - "Worth it", - "Very worth it", - "Amazing") -) - -point <- synth$summarizeLift() - - -``` -* We estimate a counterfactual lift between periods 71 and 100 of -`r scales::percent(point[[1]], accuracy = 0.1)` with a -`r scales::percent(ci_width, accuracy = 1)` probability that is as low as -`r scales::percent(point[[2]], accuracy = 0.1)` and as high as -`r scales::percent(point[[3]], accuracy = 0.1)`. - - -## With Covariates - -```{r} -covariates <- fake_data %>% - select(t, m1, m2, m3, m4) - -synth <- bsynth::bayesianSynth$new( - data = long_data, - time = t, - id = id, - treated = D, - outcome = y, - ci_width = ci_width, - covariates = covariates -) - - -synth$fit(cores = 4) - -synth$effect() - -synth$synthetic - -synth$liftDraws( - from = 71, - to = 100, - breaks = c(0.01, 0.06, 0.09), - break_names = c("Not worth it", - "Worth it", - "Very worth it", - "Amazing") -) - -point <- synth$summarizeLift() - -``` - -* We estimate a counterfactual lift between periods 71 and 100 of -`r scales::percent(point[[1]], accuracy = 0.1)` with a -`r scales::percent(ci_width, accuracy = 1)` probability that is as low as -`r scales::percent(point[[2]], accuracy = 0.1)` and as high as -`r scales::percent(point[[3]], accuracy = 0.1)`. - - - -