From 9fc401a257a10cbe682803051d604affae3b1895 Mon Sep 17 00:00:00 2001 From: Hoyoung Doh Date: Mon, 1 Aug 2022 16:42:30 +0900 Subject: [PATCH 1/2] Add three models and an example dataset for the pstRT task --- Python/hbayesdm/models/__init__.py | 6 + Python/hbayesdm/models/_pstRT_ddm.py | 244 +++++ Python/hbayesdm/models/_pstRT_rlddm1.py | 249 +++++ Python/hbayesdm/models/_pstRT_rlddm6.py | 255 +++++ Python/hbayesdm/preprocess_funcs.py | 65 ++ Python/tests/test_pstRT_ddm.py | 12 + Python/tests/test_pstRT_rlddm1.py | 12 + Python/tests/test_pstRT_rlddm6.py | 12 + R/DESCRIPTION | 5 +- R/NAMESPACE | 3 + R/R/preprocess_funcs.R | 55 ++ R/R/pstRT_ddm.R | 51 + R/R/pstRT_rlddm1.R | 56 ++ R/R/pstRT_rlddm6.R | 59 ++ R/inst/plotting/plot_functions.R | 33 + R/man/alt_delta.Rd | 19 - R/man/alt_gamma.Rd | 19 - R/man/bandit2arm_delta.Rd | 19 - R/man/bandit4arm2_kalman_filter.Rd | 19 - R/man/bandit4arm_2par_lapse.Rd | 19 - R/man/bandit4arm_4par.Rd | 19 - R/man/bandit4arm_lapse.Rd | 19 - R/man/bandit4arm_lapse_decay.Rd | 19 - R/man/bandit4arm_singleA_lapse.Rd | 19 - R/man/banditNarm_2par_lapse.Rd | 19 - R/man/banditNarm_4par.Rd | 19 - R/man/banditNarm_delta.Rd | 19 - R/man/banditNarm_kalman_filter.Rd | 19 - R/man/banditNarm_lapse.Rd | 19 - R/man/banditNarm_lapse_decay.Rd | 19 - R/man/banditNarm_singleA_lapse.Rd | 19 - R/man/bart_ewmv.Rd | 19 - R/man/bart_par4.Rd | 19 - R/man/cgt_cm.Rd | 19 - R/man/choiceRT_ddm.Rd | 19 - R/man/choiceRT_ddm_single.Rd | 19 - R/man/cra_exp.Rd | 19 - R/man/cra_linear.Rd | 19 - R/man/dbdm_prob_weight.Rd | 19 - R/man/dd_cs.Rd | 19 - R/man/dd_cs_single.Rd | 19 - R/man/dd_exp.Rd | 19 - R/man/dd_hyperbolic.Rd | 19 - R/man/dd_hyperbolic_single.Rd | 19 - R/man/gng_m1.Rd | 19 - R/man/gng_m2.Rd | 19 - R/man/gng_m3.Rd | 19 - R/man/gng_m4.Rd | 19 - R/man/igt_orl.Rd | 19 - R/man/igt_pvl_decay.Rd | 19 - R/man/igt_pvl_delta.Rd | 19 - R/man/igt_vpp.Rd | 19 - R/man/peer_ocu.Rd | 19 - R/man/prl_ewa.Rd | 19 - R/man/prl_fictitious.Rd | 19 - R/man/prl_fictitious_multipleB.Rd | 19 - R/man/prl_fictitious_rp.Rd | 19 - R/man/prl_fictitious_rp_woa.Rd | 19 - R/man/prl_fictitious_woa.Rd | 19 - R/man/prl_rp.Rd | 19 - R/man/prl_rp_multipleB.Rd | 19 - R/man/pstRT_ddm.Rd | 175 ++++ R/man/pstRT_rlddm1.Rd | 175 ++++ R/man/pstRT_rlddm6.Rd | 175 ++++ R/man/pst_Q.Rd | 19 - R/man/pst_gainloss_Q.Rd | 19 - R/man/ra_noLA.Rd | 19 - R/man/ra_noRA.Rd | 19 - R/man/ra_prospect.Rd | 19 - R/man/rdt_happiness.Rd | 19 - R/man/task2AFC_sdt.Rd | 19 - R/man/ts_par4.Rd | 19 - R/man/ts_par6.Rd | 19 - R/man/ts_par7.Rd | 19 - R/man/ug_bayes.Rd | 19 - R/man/ug_delta.Rd | 19 - R/man/wcs_sql.Rd | 19 - R/tests/testthat/test_pstRT_ddm.R | 10 + R/tests/testthat/test_pstRT_rlddm1.R | 10 + R/tests/testthat/test_pstRT_rlddm6.R | 10 + commons/extdata/pstRT_exampleData.txt | 1201 +++++++++++++++++++++++ commons/models/pstRT_ddm.yml | 44 + commons/models/pstRT_rlddm1.yml | 51 + commons/models/pstRT_rlddm6.yml | 58 ++ commons/stan_files/pstRT_ddm.stan | 203 ++++ commons/stan_files/pstRT_rlddm1.stan | 245 +++++ commons/stan_files/pstRT_rlddm6.stan | 281 ++++++ 87 files changed, 3754 insertions(+), 1122 deletions(-) create mode 100644 Python/hbayesdm/models/_pstRT_ddm.py create mode 100644 Python/hbayesdm/models/_pstRT_rlddm1.py create mode 100644 Python/hbayesdm/models/_pstRT_rlddm6.py create mode 100644 Python/tests/test_pstRT_ddm.py create mode 100644 Python/tests/test_pstRT_rlddm1.py create mode 100644 Python/tests/test_pstRT_rlddm6.py create mode 100644 R/R/pstRT_ddm.R create mode 100644 R/R/pstRT_rlddm1.R create mode 100644 R/R/pstRT_rlddm6.R create mode 100644 R/man/pstRT_ddm.Rd create mode 100644 R/man/pstRT_rlddm1.Rd create mode 100644 R/man/pstRT_rlddm6.Rd create mode 100644 R/tests/testthat/test_pstRT_ddm.R create mode 100644 R/tests/testthat/test_pstRT_rlddm1.R create mode 100644 R/tests/testthat/test_pstRT_rlddm6.R create mode 100644 commons/extdata/pstRT_exampleData.txt create mode 100644 commons/models/pstRT_ddm.yml create mode 100644 commons/models/pstRT_rlddm1.yml create mode 100644 commons/models/pstRT_rlddm6.yml create mode 100644 commons/stan_files/pstRT_ddm.stan create mode 100644 commons/stan_files/pstRT_rlddm1.stan create mode 100644 commons/stan_files/pstRT_rlddm6.stan diff --git a/Python/hbayesdm/models/__init__.py b/Python/hbayesdm/models/__init__.py index ab93def4..601a12d4 100644 --- a/Python/hbayesdm/models/__init__.py +++ b/Python/hbayesdm/models/__init__.py @@ -44,6 +44,9 @@ from ._prl_fictitious_woa import prl_fictitious_woa from ._prl_rp import prl_rp from ._prl_rp_multipleB import prl_rp_multipleB +from ._pstRT_ddm import pstRT_ddm +from ._pstRT_rlddm1 import pstRT_rlddm1 +from ._pstRT_rlddm6 import pstRT_rlddm6 from ._pst_Q import pst_Q from ._pst_gainloss_Q import pst_gainloss_Q from ._ra_noLA import ra_noLA @@ -105,6 +108,9 @@ 'prl_fictitious_woa', 'prl_rp', 'prl_rp_multipleB', + 'pstRT_ddm', + 'pstRT_rlddm1', + 'pstRT_rlddm6', 'pst_Q', 'pst_gainloss_Q', 'ra_noLA', diff --git a/Python/hbayesdm/models/_pstRT_ddm.py b/Python/hbayesdm/models/_pstRT_ddm.py new file mode 100644 index 00000000..7d2796a1 --- /dev/null +++ b/Python/hbayesdm/models/_pstRT_ddm.py @@ -0,0 +1,244 @@ +from typing import Sequence, Union, Any +from collections import OrderedDict + +from numpy import Inf, exp +import pandas as pd + +from hbayesdm.base import TaskModel +from hbayesdm.preprocess_funcs import pstRT_preprocess_func + +__all__ = ['pstRT_ddm'] + + +class PstrtDdm(TaskModel): + def __init__(self, **kwargs): + super().__init__( + task_name='pstRT', + model_name='ddm', + model_type='', + data_columns=( + 'subjID', + 'cond', + 'choice', + 'RT', + ), + parameters=OrderedDict([ + ('a', (0, 1.8, Inf)), + ('tau', (0, 0.3, Inf)), + ('d1', (-Inf, 0.8, Inf)), + ('d2', (-Inf, 0.4, Inf)), + ('d3', (-Inf, 0.3, Inf)), + ]), + regressors=OrderedDict([ + + ]), + postpreds=['choice_os', 'RT_os'], + parameters_desc=OrderedDict([ + ('a', 'boundary separation'), + ('tau', 'non-decision time'), + ('d1', 'drift rate scaling'), + ('d2', 'drift rate scaling'), + ('d3', 'drift rate scaling'), + ]), + additional_args_desc=OrderedDict([ + ('RTbound', 0.1), + ]), + **kwargs, + ) + + _preprocess_func = pstRT_preprocess_func + + +def pstRT_ddm( + data: Union[pd.DataFrame, str, None] = None, + niter: int = 4000, + nwarmup: int = 1000, + nchain: int = 4, + ncore: int = 1, + nthin: int = 1, + inits: Union[str, Sequence[float]] = 'vb', + ind_pars: str = 'mean', + model_regressor: bool = False, + vb: bool = False, + inc_postpred: bool = False, + adapt_delta: float = 0.95, + stepsize: float = 1, + max_treedepth: int = 10, + **additional_args: Any) -> TaskModel: + """Probabilistic Selection Task (with RT data) - Drift Diffusion Model + + Hierarchical Bayesian Modeling of the Probabilistic Selection Task (with RT data) [Frank2007]_, [Frank2004]_ + using Drift Diffusion Model [Pedersen2017]_ with the following parameters: + "a" (boundary separation), "tau" (non-decision time), "d1" (drift rate scaling), "d2" (drift rate scaling), "d3" (drift rate scaling). + + + + .. [Frank2007] Frank, M. J., Santamaria, A., O'Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599. + .. [Frank2004] Frank, M. J., Seeberger, L. C., & O'reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943. + .. [Pedersen2017] Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. + + + + User data should contain the behavioral data-set of all subjects of interest for + the current analysis. When loading from a file, the datafile should be a + **tab-delimited** text file, whose rows represent trial-by-trial observations + and columns represent variables. + + For the Probabilistic Selection Task (with RT data), there should be 4 columns of data + with the labels "subjID", "cond", "choice", "RT". It is not necessary for the columns to be + in this particular order; however, it is necessary that they be labeled + correctly and contain the information below: + + - "subjID": A unique identifier for each subject in the data-set. + - "cond": Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3). + - "choice": Integer value representing the option chosen on the given trial (1 or 2). + - "RT": Float value representing the time taken for the response on the given trial. + + .. note:: + User data may contain other columns of data (e.g. ``ReactionTime``, + ``trial_number``, etc.), but only the data within the column names listed + above will be used during the modeling. As long as the necessary columns + mentioned above are present and labeled correctly, there is no need to + remove other miscellaneous data columns. + + .. note:: + + ``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that + give the user more control over Stan's MCMC sampler. It is recommended that + only advanced users change the default values, as alterations can profoundly + change the sampler's behavior. See [Hoffman2014]_ for more information on the + sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm + Parameters' of the `Stan User's Guide and Reference Manual`__. + + .. [Hoffman2014] + Hoffman, M. D., & Gelman, A. (2014). + The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. + Journal of Machine Learning Research, 15(1), 1593-1623. + + __ https://mc-stan.org/users/documentation/ + + Parameters + ---------- + data + Data to be modeled. It should be given as a Pandas DataFrame object, + a filepath for a data file, or ``"example"`` for example data. + Data columns should be labeled as: "subjID", "cond", "choice", "RT". + niter + Number of iterations, including warm-up. Defaults to 4000. + nwarmup + Number of iterations used for warm-up only. Defaults to 1000. + + ``nwarmup`` is a numerical value that specifies how many MCMC samples + should not be stored upon the beginning of each chain. For those + familiar with Bayesian methods, this is equivalent to burn-in samples. + Due to the nature of the MCMC algorithm, initial values (i.e., where the + sampling chains begin) can have a heavy influence on the generated + posterior distributions. The ``nwarmup`` argument can be set to a + higher number in order to curb the effects that initial values have on + the resulting posteriors. + nchain + Number of Markov chains to run. Defaults to 4. + + ``nchain`` is a numerical value that specifies how many chains (i.e., + independent sampling sequences) should be used to draw samples from + the posterior distribution. Since the posteriors are generated from a + sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the + sampling is complete, it is possible to check the multiple chains for + convergence by running the following line of code: + + .. code:: python + + output.plot(type='trace') + ncore + Number of CPUs to be used for running. Defaults to 1. + nthin + Every ``nthin``-th sample will be used to generate the posterior + distribution. Defaults to 1. A higher number can be used when + auto-correlation within the MCMC sampling is high. + + ``nthin`` is a numerical value that specifies the "skipping" behavior + of the MCMC sampler. That is, only every ``nthin``-th sample is used to + generate posterior distributions. By default, ``nthin`` is equal to 1, + meaning that every sample is used to generate the posterior. + inits + String or list specifying how the initial values should be generated. + Options are ``'fixed'`` or ``'random'``, or your own initial values. + ind_pars + String specifying how to summarize the individual parameters. + Current options are: ``'mean'``, ``'median'``, or ``'mode'``. + model_regressor + Whether to export model-based regressors. Currently not available for this model. + vb + Whether to use variational inference to approximately draw from a + posterior distribution. Defaults to ``False``. + inc_postpred + Include trial-level posterior predictive simulations in + model output (may greatly increase file size). Defaults to ``False``. + adapt_delta + Floating point value representing the target acceptance probability of a new + sample in the MCMC chain. Must be between 0 and 1. See note below. + stepsize + Integer value specifying the size of each leapfrog step that the MCMC sampler + can take on each new iteration. See note below. + max_treedepth + Integer value specifying how many leapfrog steps the MCMC sampler can take + on each new iteration. See note below. + **additional_args + For this model, it's possible to set the following model-specific argument to a value that you may prefer. + + - ``RTbound``: Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds). + + Returns + ------- + model_data + An ``hbayesdm.TaskModel`` instance with the following components: + + - ``model``: String value that is the name of the model ('pstRT_ddm'). + - ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values + (as specified by ``ind_pars``) for each subject. + - ``par_vals``: OrderedDict holding the posterior samples over different parameters. + - ``fit``: A PyStan StanFit object that contains the fitted Stan model. + - ``raw_data``: Pandas DataFrame containing the raw data used to fit the model, + as specified by the user. + + + Examples + -------- + + .. code:: python + + from hbayesdm import rhat, print_fit + from hbayesdm.models import pstRT_ddm + + # Run the model and store results in "output" + output = pstRT_ddm(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4) + + # Visually check convergence of the sampling chains (should look like "hairy caterpillars") + output.plot(type='trace') + + # Plot posterior distributions of the hyper-parameters (distributions should be unimodal) + output.plot() + + # Check Rhat values (all Rhat values should be less than or equal to 1.1) + rhat(output, less=1.1) + + # Show the LOOIC and WAIC model fit estimates + print_fit(output) + """ + return PstrtDdm( + data=data, + niter=niter, + nwarmup=nwarmup, + nchain=nchain, + ncore=ncore, + nthin=nthin, + inits=inits, + ind_pars=ind_pars, + model_regressor=model_regressor, + vb=vb, + inc_postpred=inc_postpred, + adapt_delta=adapt_delta, + stepsize=stepsize, + max_treedepth=max_treedepth, + **additional_args) diff --git a/Python/hbayesdm/models/_pstRT_rlddm1.py b/Python/hbayesdm/models/_pstRT_rlddm1.py new file mode 100644 index 00000000..b64e915b --- /dev/null +++ b/Python/hbayesdm/models/_pstRT_rlddm1.py @@ -0,0 +1,249 @@ +from typing import Sequence, Union, Any +from collections import OrderedDict + +from numpy import Inf, exp +import pandas as pd + +from hbayesdm.base import TaskModel +from hbayesdm.preprocess_funcs import pstRT_preprocess_func + +__all__ = ['pstRT_rlddm1'] + + +class PstrtRlddm1(TaskModel): + def __init__(self, **kwargs): + super().__init__( + task_name='pstRT', + model_name='rlddm1', + model_type='', + data_columns=( + 'subjID', + 'cond', + 'prob', + 'choice', + 'RT', + 'feedback', + ), + parameters=OrderedDict([ + ('a', (0, 1.8, Inf)), + ('tau', (0, 0.3, Inf)), + ('v', (-Inf, 4.5, Inf)), + ('alpha', (0, 0.02, 1)), + ]), + regressors=OrderedDict([ + ('Q1', 2), + ('Q2', 2), + ]), + postpreds=['choice_os', 'RT_os', 'choice_sm', 'RT_sm', 'fd_sm'], + parameters_desc=OrderedDict([ + ('a', 'boundary separation'), + ('tau', 'non-decision time'), + ('v', 'drift rate scaling'), + ('alpha', 'learning rate'), + ]), + additional_args_desc=OrderedDict([ + ('RTbound', 0.1), + ('initQ', 0.5), + ]), + **kwargs, + ) + + _preprocess_func = pstRT_preprocess_func + + +def pstRT_rlddm1( + data: Union[pd.DataFrame, str, None] = None, + niter: int = 4000, + nwarmup: int = 1000, + nchain: int = 4, + ncore: int = 1, + nthin: int = 1, + inits: Union[str, Sequence[float]] = 'vb', + ind_pars: str = 'mean', + model_regressor: bool = False, + vb: bool = False, + inc_postpred: bool = False, + adapt_delta: float = 0.95, + stepsize: float = 1, + max_treedepth: int = 10, + **additional_args: Any) -> TaskModel: + """Probabilistic Selection Task (with RT data) - Reinforcement Learning Drift Diffusion Model 1 + + Hierarchical Bayesian Modeling of the Probabilistic Selection Task (with RT data) [Frank2007]_, [Frank2004]_ + using Reinforcement Learning Drift Diffusion Model 1 [Pedersen2017]_ with the following parameters: + "a" (boundary separation), "tau" (non-decision time), "v" (drift rate scaling), "alpha" (learning rate). + + + + .. [Frank2007] Frank, M. J., Santamaria, A., O'Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599. + .. [Frank2004] Frank, M. J., Seeberger, L. C., & O'reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943. + .. [Pedersen2017] Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. + + + + User data should contain the behavioral data-set of all subjects of interest for + the current analysis. When loading from a file, the datafile should be a + **tab-delimited** text file, whose rows represent trial-by-trial observations + and columns represent variables. + + For the Probabilistic Selection Task (with RT data), there should be 6 columns of data + with the labels "subjID", "cond", "prob", "choice", "RT", "feedback". It is not necessary for the columns to be + in this particular order; however, it is necessary that they be labeled + correctly and contain the information below: + + - "subjID": A unique identifier for each subject in the data-set. + - "cond": Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3). + - "prob": Float value representing the probability that a correct response (1) is rewarded in the current task condition. + - "choice": Integer value representing the option chosen on the given trial (1 or 2). + - "RT": Float value representing the time taken for the response on the given trial. + - "feedback": Integer value representing the outcome of the given trial (where 'correct' == 1, and 'incorrect' == 0). + + .. note:: + User data may contain other columns of data (e.g. ``ReactionTime``, + ``trial_number``, etc.), but only the data within the column names listed + above will be used during the modeling. As long as the necessary columns + mentioned above are present and labeled correctly, there is no need to + remove other miscellaneous data columns. + + .. note:: + + ``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that + give the user more control over Stan's MCMC sampler. It is recommended that + only advanced users change the default values, as alterations can profoundly + change the sampler's behavior. See [Hoffman2014]_ for more information on the + sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm + Parameters' of the `Stan User's Guide and Reference Manual`__. + + .. [Hoffman2014] + Hoffman, M. D., & Gelman, A. (2014). + The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. + Journal of Machine Learning Research, 15(1), 1593-1623. + + __ https://mc-stan.org/users/documentation/ + + Parameters + ---------- + data + Data to be modeled. It should be given as a Pandas DataFrame object, + a filepath for a data file, or ``"example"`` for example data. + Data columns should be labeled as: "subjID", "cond", "prob", "choice", "RT", "feedback". + niter + Number of iterations, including warm-up. Defaults to 4000. + nwarmup + Number of iterations used for warm-up only. Defaults to 1000. + + ``nwarmup`` is a numerical value that specifies how many MCMC samples + should not be stored upon the beginning of each chain. For those + familiar with Bayesian methods, this is equivalent to burn-in samples. + Due to the nature of the MCMC algorithm, initial values (i.e., where the + sampling chains begin) can have a heavy influence on the generated + posterior distributions. The ``nwarmup`` argument can be set to a + higher number in order to curb the effects that initial values have on + the resulting posteriors. + nchain + Number of Markov chains to run. Defaults to 4. + + ``nchain`` is a numerical value that specifies how many chains (i.e., + independent sampling sequences) should be used to draw samples from + the posterior distribution. Since the posteriors are generated from a + sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the + sampling is complete, it is possible to check the multiple chains for + convergence by running the following line of code: + + .. code:: python + + output.plot(type='trace') + ncore + Number of CPUs to be used for running. Defaults to 1. + nthin + Every ``nthin``-th sample will be used to generate the posterior + distribution. Defaults to 1. A higher number can be used when + auto-correlation within the MCMC sampling is high. + + ``nthin`` is a numerical value that specifies the "skipping" behavior + of the MCMC sampler. That is, only every ``nthin``-th sample is used to + generate posterior distributions. By default, ``nthin`` is equal to 1, + meaning that every sample is used to generate the posterior. + inits + String or list specifying how the initial values should be generated. + Options are ``'fixed'`` or ``'random'``, or your own initial values. + ind_pars + String specifying how to summarize the individual parameters. + Current options are: ``'mean'``, ``'median'``, or ``'mode'``. + model_regressor + Whether to export model-based regressors. For this model they are: "Q1", "Q2". + vb + Whether to use variational inference to approximately draw from a + posterior distribution. Defaults to ``False``. + inc_postpred + Include trial-level posterior predictive simulations in + model output (may greatly increase file size). Defaults to ``False``. + adapt_delta + Floating point value representing the target acceptance probability of a new + sample in the MCMC chain. Must be between 0 and 1. See note below. + stepsize + Integer value specifying the size of each leapfrog step that the MCMC sampler + can take on each new iteration. See note below. + max_treedepth + Integer value specifying how many leapfrog steps the MCMC sampler can take + on each new iteration. See note below. + **additional_args + For this model, it's possible to set the following model-specific argument to a value that you may prefer. + + - ``RTbound``: Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds). + - ``initQ``: Floating point value representing the model's initial value of any choice. + + Returns + ------- + model_data + An ``hbayesdm.TaskModel`` instance with the following components: + + - ``model``: String value that is the name of the model ('pstRT_rlddm1'). + - ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values + (as specified by ``ind_pars``) for each subject. + - ``par_vals``: OrderedDict holding the posterior samples over different parameters. + - ``fit``: A PyStan StanFit object that contains the fitted Stan model. + - ``raw_data``: Pandas DataFrame containing the raw data used to fit the model, + as specified by the user. + - ``model_regressor``: Dict holding the extracted model-based regressors. + + Examples + -------- + + .. code:: python + + from hbayesdm import rhat, print_fit + from hbayesdm.models import pstRT_rlddm1 + + # Run the model and store results in "output" + output = pstRT_rlddm1(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4) + + # Visually check convergence of the sampling chains (should look like "hairy caterpillars") + output.plot(type='trace') + + # Plot posterior distributions of the hyper-parameters (distributions should be unimodal) + output.plot() + + # Check Rhat values (all Rhat values should be less than or equal to 1.1) + rhat(output, less=1.1) + + # Show the LOOIC and WAIC model fit estimates + print_fit(output) + """ + return PstrtRlddm1( + data=data, + niter=niter, + nwarmup=nwarmup, + nchain=nchain, + ncore=ncore, + nthin=nthin, + inits=inits, + ind_pars=ind_pars, + model_regressor=model_regressor, + vb=vb, + inc_postpred=inc_postpred, + adapt_delta=adapt_delta, + stepsize=stepsize, + max_treedepth=max_treedepth, + **additional_args) diff --git a/Python/hbayesdm/models/_pstRT_rlddm6.py b/Python/hbayesdm/models/_pstRT_rlddm6.py new file mode 100644 index 00000000..92bffadc --- /dev/null +++ b/Python/hbayesdm/models/_pstRT_rlddm6.py @@ -0,0 +1,255 @@ +from typing import Sequence, Union, Any +from collections import OrderedDict + +from numpy import Inf, exp +import pandas as pd + +from hbayesdm.base import TaskModel +from hbayesdm.preprocess_funcs import pstRT_preprocess_func + +__all__ = ['pstRT_rlddm6'] + + +class PstrtRlddm6(TaskModel): + def __init__(self, **kwargs): + super().__init__( + task_name='pstRT', + model_name='rlddm6', + model_type='', + data_columns=( + 'subjID', + 'iter', + 'cond', + 'prob', + 'choice', + 'RT', + 'feedback', + ), + parameters=OrderedDict([ + ('a', (0, 1.6, Inf)), + ('bp', (-0.3, 0.02, 0.3)), + ('tau', (0, 0.2, Inf)), + ('v', (-Inf, 2.8, Inf)), + ('alpha_pos', (0, 0.04, 1)), + ('alpha_neg', (0, 0.02, 1)), + ]), + regressors=OrderedDict([ + ('Q1', 2), + ('Q2', 2), + ]), + postpreds=['choice_os', 'RT_os', 'choice_sm', 'RT_sm', 'fd_sm'], + parameters_desc=OrderedDict([ + ('a', 'boundary separation'), + ('bp', 'boundary separation power'), + ('tau', 'non-decision time'), + ('v', 'drift rate scaling'), + ('alpha_pos', 'learning rate for positive prediction error'), + ('alpha_neg', 'learning rate for negative prediction error'), + ]), + additional_args_desc=OrderedDict([ + ('RTbound', 0.1), + ('initQ', 0.5), + ]), + **kwargs, + ) + + _preprocess_func = pstRT_preprocess_func + + +def pstRT_rlddm6( + data: Union[pd.DataFrame, str, None] = None, + niter: int = 4000, + nwarmup: int = 1000, + nchain: int = 4, + ncore: int = 1, + nthin: int = 1, + inits: Union[str, Sequence[float]] = 'vb', + ind_pars: str = 'mean', + model_regressor: bool = False, + vb: bool = False, + inc_postpred: bool = False, + adapt_delta: float = 0.95, + stepsize: float = 1, + max_treedepth: int = 10, + **additional_args: Any) -> TaskModel: + """Probabilistic Selection Task (with RT data) - Reinforcement Learning Drift Diffusion Model 6 + + Hierarchical Bayesian Modeling of the Probabilistic Selection Task (with RT data) [Frank2007]_, [Frank2004]_ + using Reinforcement Learning Drift Diffusion Model 6 [Pedersen2017]_ with the following parameters: + "a" (boundary separation), "bp" (boundary separation power), "tau" (non-decision time), "v" (drift rate scaling), "alpha_pos" (learning rate for positive prediction error), "alpha_neg" (learning rate for negative prediction error). + + + + .. [Frank2007] Frank, M. J., Santamaria, A., O'Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599. + .. [Frank2004] Frank, M. J., Seeberger, L. C., & O'reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943. + .. [Pedersen2017] Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. + + + + User data should contain the behavioral data-set of all subjects of interest for + the current analysis. When loading from a file, the datafile should be a + **tab-delimited** text file, whose rows represent trial-by-trial observations + and columns represent variables. + + For the Probabilistic Selection Task (with RT data), there should be 7 columns of data + with the labels "subjID", "iter", "cond", "prob", "choice", "RT", "feedback". It is not necessary for the columns to be + in this particular order; however, it is necessary that they be labeled + correctly and contain the information below: + + - "subjID": A unique identifier for each subject in the data-set. + - "iter": Integer value representing the trial number for each task condition. + - "cond": Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3). + - "prob": Float value representing the probability that a correct response (1) is rewarded in the current task condition. + - "choice": Integer value representing the option chosen on the given trial (1 or 2). + - "RT": Float value representing the time taken for the response on the given trial. + - "feedback": Integer value representing the outcome of the given trial (where 'correct' == 1, and 'incorrect' == 0). + + .. note:: + User data may contain other columns of data (e.g. ``ReactionTime``, + ``trial_number``, etc.), but only the data within the column names listed + above will be used during the modeling. As long as the necessary columns + mentioned above are present and labeled correctly, there is no need to + remove other miscellaneous data columns. + + .. note:: + + ``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that + give the user more control over Stan's MCMC sampler. It is recommended that + only advanced users change the default values, as alterations can profoundly + change the sampler's behavior. See [Hoffman2014]_ for more information on the + sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm + Parameters' of the `Stan User's Guide and Reference Manual`__. + + .. [Hoffman2014] + Hoffman, M. D., & Gelman, A. (2014). + The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. + Journal of Machine Learning Research, 15(1), 1593-1623. + + __ https://mc-stan.org/users/documentation/ + + Parameters + ---------- + data + Data to be modeled. It should be given as a Pandas DataFrame object, + a filepath for a data file, or ``"example"`` for example data. + Data columns should be labeled as: "subjID", "iter", "cond", "prob", "choice", "RT", "feedback". + niter + Number of iterations, including warm-up. Defaults to 4000. + nwarmup + Number of iterations used for warm-up only. Defaults to 1000. + + ``nwarmup`` is a numerical value that specifies how many MCMC samples + should not be stored upon the beginning of each chain. For those + familiar with Bayesian methods, this is equivalent to burn-in samples. + Due to the nature of the MCMC algorithm, initial values (i.e., where the + sampling chains begin) can have a heavy influence on the generated + posterior distributions. The ``nwarmup`` argument can be set to a + higher number in order to curb the effects that initial values have on + the resulting posteriors. + nchain + Number of Markov chains to run. Defaults to 4. + + ``nchain`` is a numerical value that specifies how many chains (i.e., + independent sampling sequences) should be used to draw samples from + the posterior distribution. Since the posteriors are generated from a + sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the + sampling is complete, it is possible to check the multiple chains for + convergence by running the following line of code: + + .. code:: python + + output.plot(type='trace') + ncore + Number of CPUs to be used for running. Defaults to 1. + nthin + Every ``nthin``-th sample will be used to generate the posterior + distribution. Defaults to 1. A higher number can be used when + auto-correlation within the MCMC sampling is high. + + ``nthin`` is a numerical value that specifies the "skipping" behavior + of the MCMC sampler. That is, only every ``nthin``-th sample is used to + generate posterior distributions. By default, ``nthin`` is equal to 1, + meaning that every sample is used to generate the posterior. + inits + String or list specifying how the initial values should be generated. + Options are ``'fixed'`` or ``'random'``, or your own initial values. + ind_pars + String specifying how to summarize the individual parameters. + Current options are: ``'mean'``, ``'median'``, or ``'mode'``. + model_regressor + Whether to export model-based regressors. For this model they are: "Q1", "Q2". + vb + Whether to use variational inference to approximately draw from a + posterior distribution. Defaults to ``False``. + inc_postpred + Include trial-level posterior predictive simulations in + model output (may greatly increase file size). Defaults to ``False``. + adapt_delta + Floating point value representing the target acceptance probability of a new + sample in the MCMC chain. Must be between 0 and 1. See note below. + stepsize + Integer value specifying the size of each leapfrog step that the MCMC sampler + can take on each new iteration. See note below. + max_treedepth + Integer value specifying how many leapfrog steps the MCMC sampler can take + on each new iteration. See note below. + **additional_args + For this model, it's possible to set the following model-specific argument to a value that you may prefer. + + - ``RTbound``: Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds). + - ``initQ``: Floating point value representing the model's initial value of any choice. + + Returns + ------- + model_data + An ``hbayesdm.TaskModel`` instance with the following components: + + - ``model``: String value that is the name of the model ('pstRT_rlddm6'). + - ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values + (as specified by ``ind_pars``) for each subject. + - ``par_vals``: OrderedDict holding the posterior samples over different parameters. + - ``fit``: A PyStan StanFit object that contains the fitted Stan model. + - ``raw_data``: Pandas DataFrame containing the raw data used to fit the model, + as specified by the user. + - ``model_regressor``: Dict holding the extracted model-based regressors. + + Examples + -------- + + .. code:: python + + from hbayesdm import rhat, print_fit + from hbayesdm.models import pstRT_rlddm6 + + # Run the model and store results in "output" + output = pstRT_rlddm6(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4) + + # Visually check convergence of the sampling chains (should look like "hairy caterpillars") + output.plot(type='trace') + + # Plot posterior distributions of the hyper-parameters (distributions should be unimodal) + output.plot() + + # Check Rhat values (all Rhat values should be less than or equal to 1.1) + rhat(output, less=1.1) + + # Show the LOOIC and WAIC model fit estimates + print_fit(output) + """ + return PstrtRlddm6( + data=data, + niter=niter, + nwarmup=nwarmup, + nchain=nchain, + ncore=ncore, + nthin=nthin, + inits=inits, + ind_pars=ind_pars, + model_regressor=model_regressor, + vb=vb, + inc_postpred=inc_postpred, + adapt_delta=adapt_delta, + stepsize=stepsize, + max_treedepth=max_treedepth, + **additional_args) diff --git a/Python/hbayesdm/preprocess_funcs.py b/Python/hbayesdm/preprocess_funcs.py index 081c82c9..3b218424 100644 --- a/Python/hbayesdm/preprocess_funcs.py +++ b/Python/hbayesdm/preprocess_funcs.py @@ -715,7 +715,72 @@ def pst_preprocess_func(self, raw_data, general_info, additional_args): # Returned data_dict will directly be passed to pystan return data_dict + + +def pstRT_preprocess_func(self, raw_data, general_info, additional_args): + subj_group = iter(general_info['grouped_data']) + + # Use general_info(s) about raw_data + n_subj = general_info['n_subj'] + t_subjs = general_info['t_subjs'] + t_max = general_info['t_max'] + + # Initialize (model-specific) data arrays + i_subjs = np.full((n_subj, t_max), -1, dtype=int) + cond = np.full((n_subj, t_max), -1, dtype=int) + choice = np.full((n_subj, t_max), -1, dtype=int) + RT = np.full((n_subj, t_max), -1, dtype=float) + fd = np.full((n_subj, t_max), -1, dtype=int) + + # Write from subj_data to the data arrays + for s in range(n_subj): + _, subj_data = next(subj_group) + t = t_subjs[s] + i_subjs[s][:t] = subj_data['iter'] + cond[s][:t] = subj_data['cond'] + choice[s][:t] = subj_data['choice'] + RT[s][:t] = subj_data['rt'] + fd[s][:t] = subj_data['feedback'] + + # Task conditions and reward probabilities + df_prob = raw_data[['cond', 'prob']].drop_duplicates() + df_prob = df_prob.sort_values(by=['cond']) + n_cond = df_prob.shape[0] + prob = df_prob['prob'].to_numpy() + + # Minimum reaction time + minRT = np.full(n_subj, -1, dtype=float) + + # Write minRT + subj_group = iter(general_info['grouped_data']) + for s in range(n_subj): + _, subj_data = next(subj_group) + minRT[s] = min(subj_data['rt']) + # Use additional_args if provided + RTbound = additional_args.get('RTbound', 0.1) + initQ = additional_args.get('initQ', 0.5) + + # Wrap into a dict for pystan + data_dict = { + 'N': n_subj, + 'T': t_max, + 'Tsubj': t_subjs, + 'Isubj': i_subjs, + 'n_cond': n_cond, + 'cond': cond, + 'choice': choice, + 'RT': RT, + 'fd': fd, + 'initQ': initQ, + 'minRT': minRT, + 'RTbound': RTbound, + 'prob': prob + } + + # Returned data_dict will directly be passed to pystan + return data_dict + def ra_preprocess_func(self, raw_data, general_info, additional_args): # Iterate through grouped_data diff --git a/Python/tests/test_pstRT_ddm.py b/Python/tests/test_pstRT_ddm.py new file mode 100644 index 00000000..1d3389af --- /dev/null +++ b/Python/tests/test_pstRT_ddm.py @@ -0,0 +1,12 @@ +import pytest + +from hbayesdm.models import pstRT_ddm + + +def test_pstRT_ddm(): + _ = pstRT_ddm( + data="example", niter=10, nwarmup=5, nchain=1, ncore=1) + + +if __name__ == '__main__': + pytest.main() diff --git a/Python/tests/test_pstRT_rlddm1.py b/Python/tests/test_pstRT_rlddm1.py new file mode 100644 index 00000000..7129dd49 --- /dev/null +++ b/Python/tests/test_pstRT_rlddm1.py @@ -0,0 +1,12 @@ +import pytest + +from hbayesdm.models import pstRT_rlddm1 + + +def test_pstRT_rlddm1(): + _ = pstRT_rlddm1( + data="example", niter=10, nwarmup=5, nchain=1, ncore=1) + + +if __name__ == '__main__': + pytest.main() diff --git a/Python/tests/test_pstRT_rlddm6.py b/Python/tests/test_pstRT_rlddm6.py new file mode 100644 index 00000000..71335a33 --- /dev/null +++ b/Python/tests/test_pstRT_rlddm6.py @@ -0,0 +1,12 @@ +import pytest + +from hbayesdm.models import pstRT_rlddm6 + + +def test_pstRT_rlddm6(): + _ = pstRT_rlddm6( + data="example", niter=10, nwarmup=5, nchain=1, ncore=1) + + +if __name__ == '__main__': + pytest.main() diff --git a/R/DESCRIPTION b/R/DESCRIPTION index ebe607d6..ffea48aa 100644 --- a/R/DESCRIPTION +++ b/R/DESCRIPTION @@ -44,7 +44,7 @@ BugReports: https://github.com/CCS-Lab/hBayesDM/issues License: GPL-3 NeedsCompilation: yes Encoding: UTF-8 -RoxygenNote: 7.1.1 +RoxygenNote: 7.1.2 SystemRequirements: GNU make Collate: 'HDIofMCMC.R' @@ -109,6 +109,9 @@ Collate: 'prl_fictitious_woa.R' 'prl_rp.R' 'prl_rp_multipleB.R' + 'pstRT_ddm.R' + 'pstRT_rlddm1.R' + 'pstRT_rlddm6.R' 'pst_Q.R' 'pst_gainloss_Q.R' 'ra_noLA.R' diff --git a/R/NAMESPACE b/R/NAMESPACE index e7cb914c..112480b2 100644 --- a/R/NAMESPACE +++ b/R/NAMESPACE @@ -55,6 +55,9 @@ export(prl_fictitious_rp_woa) export(prl_fictitious_woa) export(prl_rp) export(prl_rp_multipleB) +export(pstRT_ddm) +export(pstRT_rlddm1) +export(pstRT_rlddm6) export(pst_Q) export(pst_gainloss_Q) export(ra_noLA) diff --git a/R/R/preprocess_funcs.R b/R/R/preprocess_funcs.R index e1f955b5..83911f6e 100644 --- a/R/R/preprocess_funcs.R +++ b/R/R/preprocess_funcs.R @@ -710,6 +710,61 @@ pst_preprocess_func <- function(raw_data, general_info) { return(data_list) } +# Make a function +pstRT_preprocess_func <- function(raw_data, general_info, RTbound = 0.1, initQ = 0.5) { + # Use raw_data as a data.frame + raw_data <- as.data.frame(raw_data) + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Information for each trial + i_subjs <- array(-1, c(n_subj, t_max)) + cond <- array(-1, c(n_subj, t_max)) + choice <- array(-1, c(n_subj, t_max)) + RT <- array(-1, c(n_subj, t_max)) + fd <- array(-1, c(n_subj, t_max)) + for (i in 1:n_subj) { + subj <- subjs[i] + subj_data <- subset(raw_data, raw_data$subjid == subj) + + i_subjs[i, 1:t_subjs[i]] <- subj_data$iter + cond[i, 1:t_subjs[i]] <- subj_data$cond + choice[i, 1:t_subjs[i]] <- subj_data$choice + RT[i, 1:t_subjs[i]] <- subj_data$rt + fd[i, 1:t_subjs[i]] <- subj_data$feedback + } + + # Task conditions and reward probabilities + df_prob <- unique(raw_data[, c('cond', 'prob')]) + df_prob <- df_prob[order(df_prob$cond), ] + n_cond <- nrow(df_prob) + prob <- df_prob$prob + + # Minimum reaction time + minRT <- with(raw_data, aggregate(rt, by = list(y = subjid), FUN = min)[["x"]]) + + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + Isubj = i_subjs, + n_cond = n_cond, + cond = cond, + choice = choice, + RT = RT, + fd = fd, + initQ = initQ, + minRT = minRT, + RTbound = RTbound, + prob = prob + ) +} + ra_preprocess_func <- function(raw_data, general_info) { # Currently class(raw_data) == "data.table" diff --git a/R/R/pstRT_ddm.R b/R/R/pstRT_ddm.R new file mode 100644 index 00000000..1f427186 --- /dev/null +++ b/R/R/pstRT_ddm.R @@ -0,0 +1,51 @@ +#' @templateVar MODEL_FUNCTION pstRT_ddm +#' @templateVar CONTRIBUTOR +#' @templateVar TASK_NAME Probabilistic Selection Task (with RT data) +#' @templateVar TASK_CODE pstRT +#' @templateVar TASK_CITE (Frank et al., 2007; Frank et al., 2004) +#' @templateVar MODEL_NAME Drift Diffusion Model +#' @templateVar MODEL_CODE ddm +#' @templateVar MODEL_CITE (Pedersen et al., 2017) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "cond", "choice", "RT" +#' @templateVar PARAMETERS \code{a} (boundary separation), \code{tau} (non-decision time), \code{d1} (drift rate scaling), \code{d2} (drift rate scaling), \code{d3} (drift rate scaling) +#' @templateVar REGRESSORS +#' @templateVar POSTPREDS "choice_os", "RT_os" +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{subjID}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{cond}{Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3).} +#' @templateVar DETAILS_DATA_3 \item{choice}{Integer value representing the option chosen on the given trial (1 or 2).} +#' @templateVar DETAILS_DATA_4 \item{RT}{Float value representing the time taken for the response on the given trial.} +#' @templateVar LENGTH_ADDITIONAL_ARGS 1 +#' @templateVar ADDITIONAL_ARGS_1 \item{RTbound}{Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} +#' +#' @template model-documentation +#' +#' @export +#' @include hBayesDM_model.R +#' @include preprocess_funcs.R + +#' @references +#' Frank, M. J., Santamaria, A., O'Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599. +#' +#' Frank, M. J., Seeberger, L. C., & O'reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943. +#' +#' Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. +#' + + +pstRT_ddm <- hBayesDM_model( + task_name = "pstRT", + model_name = "ddm", + model_type = "", + data_columns = c("subjID", "cond", "choice", "RT"), + parameters = list( + "a" = c(0, 1.8, Inf), + "tau" = c(0, 0.3, Inf), + "d1" = c(-Inf, 0.8, Inf), + "d2" = c(-Inf, 0.4, Inf), + "d3" = c(-Inf, 0.3, Inf) + ), + regressors = NULL, + postpreds = c("choice_os", "RT_os"), + preprocess_func = pstRT_preprocess_func) diff --git a/R/R/pstRT_rlddm1.R b/R/R/pstRT_rlddm1.R new file mode 100644 index 00000000..0f3d3948 --- /dev/null +++ b/R/R/pstRT_rlddm1.R @@ -0,0 +1,56 @@ +#' @templateVar MODEL_FUNCTION pstRT_rlddm1 +#' @templateVar CONTRIBUTOR +#' @templateVar TASK_NAME Probabilistic Selection Task (with RT data) +#' @templateVar TASK_CODE pstRT +#' @templateVar TASK_CITE (Frank et al., 2007; Frank et al., 2004) +#' @templateVar MODEL_NAME Reinforcement Learning Drift Diffusion Model 1 +#' @templateVar MODEL_CODE rlddm1 +#' @templateVar MODEL_CITE (Pedersen et al., 2017) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "cond", "prob", "choice", "RT", "feedback" +#' @templateVar PARAMETERS \code{a} (boundary separation), \code{tau} (non-decision time), \code{v} (drift rate scaling), \code{alpha} (learning rate) +#' @templateVar REGRESSORS "Q1", "Q2" +#' @templateVar POSTPREDS "choice_os", "RT_os", "choice_sm", "RT_sm", "fd_sm" +#' @templateVar LENGTH_DATA_COLUMNS 6 +#' @templateVar DETAILS_DATA_1 \item{subjID}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{cond}{Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3).} +#' @templateVar DETAILS_DATA_3 \item{prob}{Float value representing the probability that a correct response (1) is rewarded in the current task condition.} +#' @templateVar DETAILS_DATA_4 \item{choice}{Integer value representing the option chosen on the given trial (1 or 2).} +#' @templateVar DETAILS_DATA_5 \item{RT}{Float value representing the time taken for the response on the given trial.} +#' @templateVar DETAILS_DATA_6 \item{feedback}{Integer value representing the outcome of the given trial (where 'correct' == 1, and 'incorrect' == 0).} +#' @templateVar LENGTH_ADDITIONAL_ARGS 2 +#' @templateVar ADDITIONAL_ARGS_1 \item{RTbound}{Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} +#' @templateVar ADDITIONAL_ARGS_2 \item{initQ}{Floating point value representing the model's initial value of any choice.} +#' +#' @template model-documentation +#' +#' @export +#' @include hBayesDM_model.R +#' @include preprocess_funcs.R + +#' @references +#' Frank, M. J., Santamaria, A., O'Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599. +#' +#' Frank, M. J., Seeberger, L. C., & O'reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943. +#' +#' Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. +#' + + +pstRT_rlddm1 <- hBayesDM_model( + task_name = "pstRT", + model_name = "rlddm1", + model_type = "", + data_columns = c("subjID", "cond", "prob", "choice", "RT", "feedback"), + parameters = list( + "a" = c(0, 1.8, Inf), + "tau" = c(0, 0.3, Inf), + "v" = c(-Inf, 4.5, Inf), + "alpha" = c(0, 0.02, 1) + ), + regressors = list( + "Q1" = 2, + "Q2" = 2 + ), + postpreds = c("choice_os", "RT_os", "choice_sm", "RT_sm", "fd_sm"), + preprocess_func = pstRT_preprocess_func) diff --git a/R/R/pstRT_rlddm6.R b/R/R/pstRT_rlddm6.R new file mode 100644 index 00000000..54d4ce48 --- /dev/null +++ b/R/R/pstRT_rlddm6.R @@ -0,0 +1,59 @@ +#' @templateVar MODEL_FUNCTION pstRT_rlddm6 +#' @templateVar CONTRIBUTOR +#' @templateVar TASK_NAME Probabilistic Selection Task (with RT data) +#' @templateVar TASK_CODE pstRT +#' @templateVar TASK_CITE (Frank et al., 2007; Frank et al., 2004) +#' @templateVar MODEL_NAME Reinforcement Learning Drift Diffusion Model 6 +#' @templateVar MODEL_CODE rlddm6 +#' @templateVar MODEL_CITE (Pedersen et al., 2017) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "iter", "cond", "prob", "choice", "RT", "feedback" +#' @templateVar PARAMETERS \code{a} (boundary separation), \code{bp} (boundary separation power), \code{tau} (non-decision time), \code{v} (drift rate scaling), \code{alpha_pos} (learning rate for positive prediction error), \code{alpha_neg} (learning rate for negative prediction error) +#' @templateVar REGRESSORS "Q1", "Q2" +#' @templateVar POSTPREDS "choice_os", "RT_os", "choice_sm", "RT_sm", "fd_sm" +#' @templateVar LENGTH_DATA_COLUMNS 7 +#' @templateVar DETAILS_DATA_1 \item{subjID}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{iter}{Integer value representing the trial number for each task condition.} +#' @templateVar DETAILS_DATA_3 \item{cond}{Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3).} +#' @templateVar DETAILS_DATA_4 \item{prob}{Float value representing the probability that a correct response (1) is rewarded in the current task condition.} +#' @templateVar DETAILS_DATA_5 \item{choice}{Integer value representing the option chosen on the given trial (1 or 2).} +#' @templateVar DETAILS_DATA_6 \item{RT}{Float value representing the time taken for the response on the given trial.} +#' @templateVar DETAILS_DATA_7 \item{feedback}{Integer value representing the outcome of the given trial (where 'correct' == 1, and 'incorrect' == 0).} +#' @templateVar LENGTH_ADDITIONAL_ARGS 2 +#' @templateVar ADDITIONAL_ARGS_1 \item{RTbound}{Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} +#' @templateVar ADDITIONAL_ARGS_2 \item{initQ}{Floating point value representing the model's initial value of any choice.} +#' +#' @template model-documentation +#' +#' @export +#' @include hBayesDM_model.R +#' @include preprocess_funcs.R + +#' @references +#' Frank, M. J., Santamaria, A., O'Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599. +#' +#' Frank, M. J., Seeberger, L. C., & O'reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943. +#' +#' Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. +#' + + +pstRT_rlddm6 <- hBayesDM_model( + task_name = "pstRT", + model_name = "rlddm6", + model_type = "", + data_columns = c("subjID", "iter", "cond", "prob", "choice", "RT", "feedback"), + parameters = list( + "a" = c(0, 1.6, Inf), + "bp" = c(-0.3, 0.02, 0.3), + "tau" = c(0, 0.2, Inf), + "v" = c(-Inf, 2.8, Inf), + "alpha_pos" = c(0, 0.04, 1), + "alpha_neg" = c(0, 0.02, 1) + ), + regressors = list( + "Q1" = 2, + "Q2" = 2 + ), + postpreds = c("choice_os", "RT_os", "choice_sm", "RT_sm", "fd_sm"), + preprocess_func = pstRT_preprocess_func) diff --git a/R/inst/plotting/plot_functions.R b/R/inst/plotting/plot_functions.R index 379656d3..ff841c77 100644 --- a/R/inst/plotting/plot_functions.R +++ b/R/inst/plotting/plot_functions.R @@ -414,6 +414,39 @@ plot_pst_gainloss_Q <- function(obj, fontSize = 10, ncols = 3, binSize = 30) { return(h_all) } +plot_pstRT_ddm <- function(obj, fontSize = 10, ncols = 5, binSize = 30) { + pars = obj$parVals + h1 = plotDist(sample = pars$mu_a, fontSize = fontSize, binSize = binSize, xLab = expression(paste(a, " (Boundary Separation)"))) + h2 = plotDist(sample = pars$mu_tau, fontSize = fontSize, binSize = binSize, xLab = expression(paste(tau, " (Non-Decision Time)"))) + h3 = plotDist(sample = pars$mu_d1, fontSize = fontSize, binSize = binSize, xLab = expression(paste(d1, " (Drift Rate 1)"))) + h4 = plotDist(sample = pars$mu_d2, fontSize = fontSize, binSize = binSize, xLab = expression(paste(d2, " (Drift Rate 2)"))) + h5 = plotDist(sample = pars$mu_d3, fontSize = fontSize, binSize = binSize, xLab = expression(paste(d3, " (Drift Rate 3)"))) + h_all = multiplot(h1, h2, h3, h4, h5, cols = ncols) + return(h_all) +} + +plot_pstRT_rlddm1 <- function(obj, fontSize = 10, ncols = 4, binSize = 30) { + pars = obj$parVals + h1 = plotDist(sample = pars$mu_a, fontSize = fontSize, binSize = binSize, xLab = expression(paste(a, " (Boundary Separation)"))) + h2 = plotDist(sample = pars$mu_tau, fontSize = fontSize, binSize = binSize, xLab = expression(paste(tau, " (Non-Decision Time)"))) + h3 = plotDist(sample = pars$mu_v, fontSize = fontSize, binSize = binSize, xLab = expression(paste(v, " (Drift Rate Scaling)"))) + h4 = plotDist(sample = pars$mu_alpha, fontSize = fontSize, binSize = binSize, xLab = expression(paste(alpha, " (Learning Rate)"))) + h_all = multiplot(h1, h2, h3, h4, cols = ncols) + return(h_all) +} + +plot_pstRT_rlddm6 <- function(obj, fontSize = 10, ncols = 3, binSize = 30) { + pars = obj$parVals + h1 = plotDist(sample = pars$mu_a, fontSize = fontSize, binSize = binSize, xLab = expression(paste(a, " (Boundary Baseline)"))) + h2 = plotDist(sample = pars$mu_bp, fontSize = fontSize, binSize = binSize, xLab = expression(paste(bp, " (Boundary Power)"))) + h3 = plotDist(sample = pars$mu_tau, fontSize = fontSize, binSize = binSize, xLab = expression(paste(tau, " (Non-Decision Time)"))) + h4 = plotDist(sample = pars$mu_v, fontSize = fontSize, binSize = binSize, xLab = expression(paste(v, " (Drift Rate Scaling)"))) + h5 = plotDist(sample = pars$mu_alpha_pos, fontSize = fontSize, binSize = binSize, xLab = expression(paste(alpha[pos], " (+Learning Rate)"))) + h6 = plotDist(sample = pars$mu_alpha_neg, fontSize = fontSize, binSize = binSize, xLab = expression(paste(alpha[neg], " (-Learning Rate)"))) + h_all = multiplot(h1, h2, h3, h4, h5, h6, cols = ncols) + return(h_all) +} + plot_bandit4arm2_kalman_filter <- function(obj, fontSize = 10, ncols = 6, binSize = 30) { pars = obj$parVals h1 = plotDist(sample = pars$mu_lambda, fontSize = fontSize, binSize = binSize, xLab = expression(paste(lambda, " (Decay Factor)"))) diff --git a/R/man/alt_delta.Rd b/R/man/alt_delta.Rd index 25c07332..56b93330 100644 --- a/R/man/alt_delta.Rd +++ b/R/man/alt_delta.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Aversive Learning Task, there should be 5 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://github.com/lilihub}{Lili Zhang} <\email{lili.zhang27@mail.dcu.ie}>} } \examples{ diff --git a/R/man/alt_gamma.Rd b/R/man/alt_gamma.Rd index 54ebaa16..9ea29760 100644 --- a/R/man/alt_gamma.Rd +++ b/R/man/alt_gamma.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Aversive Learning Task, there should be 5 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://github.com/lilihub}{Lili Zhang} <\email{lili.zhang27@mail.dcu.ie}>} } \examples{ diff --git a/R/man/bandit2arm_delta.Rd b/R/man/bandit2arm_delta.Rd index e77ebeee..4a8a89b2 100644 --- a/R/man/bandit2arm_delta.Rd +++ b/R/man/bandit2arm_delta.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the 2-Armed Bandit Task, there should be 3 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/bandit4arm2_kalman_filter.Rd b/R/man/bandit4arm2_kalman_filter.Rd index dead139a..618e4012 100644 --- a/R/man/bandit4arm2_kalman_filter.Rd +++ b/R/man/bandit4arm2_kalman_filter.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the 4-Armed Bandit Task (modified), there should be 3 columns of data with t using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/yoonseo-zoh/}{Yoonseo Zoh} <\email{zohyos7@gmail.com}>} } \examples{ diff --git a/R/man/bandit4arm_2par_lapse.Rd b/R/man/bandit4arm_2par_lapse.Rd index ffb1a3ee..dda6f115 100644 --- a/R/man/bandit4arm_2par_lapse.Rd +++ b/R/man/bandit4arm_2par_lapse.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the 4-Armed Bandit Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/bandit4arm_4par.Rd b/R/man/bandit4arm_4par.Rd index 4834b9f8..8da6dbf5 100644 --- a/R/man/bandit4arm_4par.Rd +++ b/R/man/bandit4arm_4par.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the 4-Armed Bandit Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/bandit4arm_lapse.Rd b/R/man/bandit4arm_lapse.Rd index 936d1d6b..7dd2a2b6 100644 --- a/R/man/bandit4arm_lapse.Rd +++ b/R/man/bandit4arm_lapse.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the 4-Armed Bandit Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/bandit4arm_lapse_decay.Rd b/R/man/bandit4arm_lapse_decay.Rd index 276f9f59..b69e5df5 100644 --- a/R/man/bandit4arm_lapse_decay.Rd +++ b/R/man/bandit4arm_lapse_decay.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the 4-Armed Bandit Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/bandit4arm_singleA_lapse.Rd b/R/man/bandit4arm_singleA_lapse.Rd index 7dca88a7..bbb4a2cb 100644 --- a/R/man/bandit4arm_singleA_lapse.Rd +++ b/R/man/bandit4arm_singleA_lapse.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the 4-Armed Bandit Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/banditNarm_2par_lapse.Rd b/R/man/banditNarm_2par_lapse.Rd index a52af4f5..f6c3d465 100644 --- a/R/man/banditNarm_2par_lapse.Rd +++ b/R/man/banditNarm_2par_lapse.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{Narm}{Number of arms used in Multi-armed Bandit Task If not given, the number of unique choice will be used.} @@ -148,16 +139,6 @@ For the N-Armed Bandit Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://github.com/cheoljun95}{Cheol Jun Cho} <\email{cjfwndnsl@gmail.com}>} } \examples{ diff --git a/R/man/banditNarm_4par.Rd b/R/man/banditNarm_4par.Rd index 07c69ca8..8776076b 100644 --- a/R/man/banditNarm_4par.Rd +++ b/R/man/banditNarm_4par.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{Narm}{Number of arms used in Multi-armed Bandit Task If not given, the number of unique choice will be used.} @@ -148,16 +139,6 @@ For the N-Armed Bandit Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://github.com/cheoljun95}{Cheol Jun Cho} <\email{cjfwndnsl@gmail.com}>} } \examples{ diff --git a/R/man/banditNarm_delta.Rd b/R/man/banditNarm_delta.Rd index cb0b78aa..0d59635e 100644 --- a/R/man/banditNarm_delta.Rd +++ b/R/man/banditNarm_delta.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{Narm}{Number of arms used in Multi-armed Bandit Task If not given, the number of unique choice will be used.} @@ -148,16 +139,6 @@ For the N-Armed Bandit Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://github.com/cheoljun95}{Cheol Jun Cho} <\email{cjfwndnsl@gmail.com}>} } \examples{ diff --git a/R/man/banditNarm_kalman_filter.Rd b/R/man/banditNarm_kalman_filter.Rd index f531634a..bd50be1e 100644 --- a/R/man/banditNarm_kalman_filter.Rd +++ b/R/man/banditNarm_kalman_filter.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{Narm}{Number of arms used in Multi-armed Bandit Task If not given, the number of unique choice will be used.} @@ -148,16 +139,6 @@ For the N-Armed Bandit Task (modified), there should be 4 columns of data with t using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/yoonseo-zoh/}{Yoonseo Zoh} <\email{zohyos7@gmail.com}>, \href{https://github.com/cheoljun95}{Cheol Jun Cho} <\email{cjfwndnsl@gmail.com}>} } \examples{ diff --git a/R/man/banditNarm_lapse.Rd b/R/man/banditNarm_lapse.Rd index 7e052b79..39b496c2 100644 --- a/R/man/banditNarm_lapse.Rd +++ b/R/man/banditNarm_lapse.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{Narm}{Number of arms used in Multi-armed Bandit Task If not given, the number of unique choice will be used.} @@ -148,16 +139,6 @@ For the N-Armed Bandit Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://github.com/cheoljun95}{Cheol Jun Cho} <\email{cjfwndnsl@gmail.com}>} } \examples{ diff --git a/R/man/banditNarm_lapse_decay.Rd b/R/man/banditNarm_lapse_decay.Rd index ef503ac1..9af70850 100644 --- a/R/man/banditNarm_lapse_decay.Rd +++ b/R/man/banditNarm_lapse_decay.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{Narm}{Number of arms used in Multi-armed Bandit Task If not given, the number of unique choice will be used.} @@ -148,16 +139,6 @@ For the N-Armed Bandit Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://github.com/cheoljun95}{Cheol Jun Cho} <\email{cjfwndnsl@gmail.com}>} } \examples{ diff --git a/R/man/banditNarm_singleA_lapse.Rd b/R/man/banditNarm_singleA_lapse.Rd index 4bec86cf..fd121625 100644 --- a/R/man/banditNarm_singleA_lapse.Rd +++ b/R/man/banditNarm_singleA_lapse.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{Narm}{Number of arms used in Multi-armed Bandit Task If not given, the number of unique choice will be used.} @@ -148,16 +139,6 @@ For the N-Armed Bandit Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://github.com/cheoljun95}{Cheol Jun Cho} <\email{cjfwndnsl@gmail.com}>} } \examples{ diff --git a/R/man/bart_ewmv.Rd b/R/man/bart_ewmv.Rd index ed6d7042..84c490dd 100644 --- a/R/man/bart_ewmv.Rd +++ b/R/man/bart_ewmv.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Balloon Analogue Risk Task, there should be 3 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} <\email{hrpark12@gmail.com}>, \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} <\email{jaeyeong.yang1125@gmail.com}>} } \examples{ diff --git a/R/man/bart_par4.Rd b/R/man/bart_par4.Rd index 1336420f..eee85410 100644 --- a/R/man/bart_par4.Rd +++ b/R/man/bart_par4.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Balloon Analogue Risk Task, there should be 3 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} <\email{hrpark12@gmail.com}>, \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} <\email{jaeyeong.yang1125@gmail.com}>, \href{https://ccs-lab.github.io/team/ayoung-lee/}{Ayoung Lee} <\email{aylee2008@naver.com}>, \href{https://ccs-lab.github.io/team/jeongbin-oh/}{Jeongbin Oh} <\email{ows0104@gmail.com}>, \href{https://ccs-lab.github.io/team/jiyoon-lee/}{Jiyoon Lee} <\email{nicole.lee2001@gmail.com}>, \href{https://ccs-lab.github.io/team/junha-jang/}{Junha Jang} <\email{andy627robo@naver.com}>} } \examples{ diff --git a/R/man/cgt_cm.Rd b/R/man/cgt_cm.Rd index ce4fab08..adf6b94e 100644 --- a/R/man/cgt_cm.Rd +++ b/R/man/cgt_cm.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. Not available for this model.} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Cambridge Gambling Task, there should be 7 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/nate-haines/}{Nathaniel Haines} <\email{haines.175@osu.edu}>} } \examples{ diff --git a/R/man/choiceRT_ddm.Rd b/R/man/choiceRT_ddm.Rd index b0ac0670..932c19ab 100644 --- a/R/man/choiceRT_ddm.Rd +++ b/R/man/choiceRT_ddm.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. Not available for this model.} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{RTbound}{Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} @@ -147,16 +138,6 @@ For the Choice Reaction Time Task, there should be 3 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/choiceRT_ddm_single.Rd b/R/man/choiceRT_ddm_single.Rd index 51e72d4e..8dcb332d 100644 --- a/R/man/choiceRT_ddm_single.Rd +++ b/R/man/choiceRT_ddm_single.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. Not available for this model.} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{RTbound}{Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} @@ -147,16 +138,6 @@ For the Choice Reaction Time Task, there should be 3 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/cra_exp.Rd b/R/man/cra_exp.Rd index 5db87ca7..f2dc0df8 100644 --- a/R/man/cra_exp.Rd +++ b/R/man/cra_exp.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Choice Under Risk and Ambiguity Task, there should be 6 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} <\email{jaeyeong.yang1125@gmail.com}>} } \examples{ diff --git a/R/man/cra_linear.Rd b/R/man/cra_linear.Rd index 2b7a160a..13bcfbea 100644 --- a/R/man/cra_linear.Rd +++ b/R/man/cra_linear.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Choice Under Risk and Ambiguity Task, there should be 6 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} <\email{jaeyeong.yang1125@gmail.com}>} } \examples{ diff --git a/R/man/dbdm_prob_weight.Rd b/R/man/dbdm_prob_weight.Rd index da1edc7e..dad238ac 100644 --- a/R/man/dbdm_prob_weight.Rd +++ b/R/man/dbdm_prob_weight.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Description Based Decison Making Task, there should be 8 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/yoonseo-zoh/}{Yoonseo Zoh} <\email{zohyos7@gmail.com}>} } \examples{ diff --git a/R/man/dd_cs.Rd b/R/man/dd_cs.Rd index 2eddfb21..a501f83f 100644 --- a/R/man/dd_cs.Rd +++ b/R/man/dd_cs.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Delay Discounting Task, there should be 6 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/dd_cs_single.Rd b/R/man/dd_cs_single.Rd index 6123f6c0..bb86b535 100644 --- a/R/man/dd_cs_single.Rd +++ b/R/man/dd_cs_single.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Delay Discounting Task, there should be 6 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/dd_exp.Rd b/R/man/dd_exp.Rd index 3215d4ea..91d010ae 100644 --- a/R/man/dd_exp.Rd +++ b/R/man/dd_exp.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Delay Discounting Task, there should be 6 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/dd_hyperbolic.Rd b/R/man/dd_hyperbolic.Rd index a8756e0a..71549718 100644 --- a/R/man/dd_hyperbolic.Rd +++ b/R/man/dd_hyperbolic.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Delay Discounting Task, there should be 6 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/dd_hyperbolic_single.Rd b/R/man/dd_hyperbolic_single.Rd index 7a0927c0..ad67210d 100644 --- a/R/man/dd_hyperbolic_single.Rd +++ b/R/man/dd_hyperbolic_single.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Delay Discounting Task, there should be 6 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/gng_m1.Rd b/R/man/gng_m1.Rd index d4132e36..93dc7d0d 100644 --- a/R/man/gng_m1.Rd +++ b/R/man/gng_m1.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Orthogonalized Go/Nogo Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/gng_m2.Rd b/R/man/gng_m2.Rd index 6f45a3da..8579e8d2 100644 --- a/R/man/gng_m2.Rd +++ b/R/man/gng_m2.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Orthogonalized Go/Nogo Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/gng_m3.Rd b/R/man/gng_m3.Rd index e2f124c1..f293fc8b 100644 --- a/R/man/gng_m3.Rd +++ b/R/man/gng_m3.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Orthogonalized Go/Nogo Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/gng_m4.Rd b/R/man/gng_m4.Rd index 820e0a2b..eabcca30 100644 --- a/R/man/gng_m4.Rd +++ b/R/man/gng_m4.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Orthogonalized Go/Nogo Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/igt_orl.Rd b/R/man/igt_orl.Rd index 34ab2bcf..51fabad4 100644 --- a/R/man/igt_orl.Rd +++ b/R/man/igt_orl.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{payscale}{Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100.} @@ -148,16 +139,6 @@ For the Iowa Gambling Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/nate-haines/}{Nate Haines} <\email{haines.175@osu.edu}>} } \examples{ diff --git a/R/man/igt_pvl_decay.Rd b/R/man/igt_pvl_decay.Rd index 85df0d9d..85cbcccc 100644 --- a/R/man/igt_pvl_decay.Rd +++ b/R/man/igt_pvl_decay.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{payscale}{Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100.} @@ -147,16 +138,6 @@ For the Iowa Gambling Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/igt_pvl_delta.Rd b/R/man/igt_pvl_delta.Rd index 62d58d4f..539ba3c5 100644 --- a/R/man/igt_pvl_delta.Rd +++ b/R/man/igt_pvl_delta.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{payscale}{Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100.} @@ -147,16 +138,6 @@ For the Iowa Gambling Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/igt_vpp.Rd b/R/man/igt_vpp.Rd index 59a34153..14b6df45 100644 --- a/R/man/igt_vpp.Rd +++ b/R/man/igt_vpp.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{payscale}{Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100.} @@ -147,16 +138,6 @@ For the Iowa Gambling Task, there should be 4 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/peer_ocu.Rd b/R/man/peer_ocu.Rd index 17828ef0..0daa3172 100644 --- a/R/man/peer_ocu.Rd +++ b/R/man/peer_ocu.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Peer Influence Task, there should be 8 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/prl_ewa.Rd b/R/man/prl_ewa.Rd index 5c8f26b9..d9569cb7 100644 --- a/R/man/prl_ewa.Rd +++ b/R/man/prl_ewa.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Reversal Learning Task, there should be 3 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang (for model-based regressors)} <\email{jaeyeong.yang1125@gmail.com}>, \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park (for model-based regressors)} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/prl_fictitious.Rd b/R/man/prl_fictitious.Rd index 565bce36..2d07e5e4 100644 --- a/R/man/prl_fictitious.Rd +++ b/R/man/prl_fictitious.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Reversal Learning Task, there should be 3 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang (for model-based regressors)} <\email{jaeyeong.yang1125@gmail.com}>, \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park (for model-based regressors)} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/prl_fictitious_multipleB.Rd b/R/man/prl_fictitious_multipleB.Rd index 33694353..33680c45 100644 --- a/R/man/prl_fictitious_multipleB.Rd +++ b/R/man/prl_fictitious_multipleB.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Reversal Learning Task, there should be 4 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang (for model-based regressors)} <\email{jaeyeong.yang1125@gmail.com}>, \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park (for model-based regressors)} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/prl_fictitious_rp.Rd b/R/man/prl_fictitious_rp.Rd index 580ccd4e..05380a7b 100644 --- a/R/man/prl_fictitious_rp.Rd +++ b/R/man/prl_fictitious_rp.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Reversal Learning Task, there should be 3 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang (for model-based regressors)} <\email{jaeyeong.yang1125@gmail.com}>, \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park (for model-based regressors)} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/prl_fictitious_rp_woa.Rd b/R/man/prl_fictitious_rp_woa.Rd index 23e25c4c..d59ae414 100644 --- a/R/man/prl_fictitious_rp_woa.Rd +++ b/R/man/prl_fictitious_rp_woa.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Reversal Learning Task, there should be 3 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang (for model-based regressors)} <\email{jaeyeong.yang1125@gmail.com}>, \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park (for model-based regressors)} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/prl_fictitious_woa.Rd b/R/man/prl_fictitious_woa.Rd index 91409353..4b858b12 100644 --- a/R/man/prl_fictitious_woa.Rd +++ b/R/man/prl_fictitious_woa.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Reversal Learning Task, there should be 3 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang (for model-based regressors)} <\email{jaeyeong.yang1125@gmail.com}>, \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park (for model-based regressors)} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/prl_rp.Rd b/R/man/prl_rp.Rd index d8fecdc6..7b43116c 100644 --- a/R/man/prl_rp.Rd +++ b/R/man/prl_rp.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Reversal Learning Task, there should be 3 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang (for model-based regressors)} <\email{jaeyeong.yang1125@gmail.com}>, \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park (for model-based regressors)} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/prl_rp_multipleB.Rd b/R/man/prl_rp_multipleB.Rd index 2fadd9c6..7bd582ff 100644 --- a/R/man/prl_rp_multipleB.Rd +++ b/R/man/prl_rp_multipleB.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Reversal Learning Task, there should be 4 columns of data using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang (for model-based regressors)} <\email{jaeyeong.yang1125@gmail.com}>, \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park (for model-based regressors)} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/pstRT_ddm.Rd b/R/man/pstRT_ddm.Rd new file mode 100644 index 00000000..259ec0c9 --- /dev/null +++ b/R/man/pstRT_ddm.Rd @@ -0,0 +1,175 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pstRT_ddm.R +\name{pstRT_ddm} +\alias{pstRT_ddm} +\title{Drift Diffusion Model} +\usage{ +pstRT_ddm( + data = NULL, + niter = 4000, + nwarmup = 1000, + nchain = 4, + ncore = 1, + nthin = 1, + inits = "vb", + indPars = "mean", + modelRegressor = FALSE, + vb = FALSE, + inc_postpred = FALSE, + adapt_delta = 0.95, + stepsize = 1, + max_treedepth = 10, + ... +) +} +\arguments{ +\item{data}{Data to be modeled. It should be given as a data.frame object, +a filepath for a tab-seperated txt file, \code{"example"} to use example data, or +\code{"choose"} to choose data with an interactive window. +Columns in the dataset must include: +"subjID", "cond", "choice", "RT". See \bold{Details} below for more information.} + +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} + +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} + +\item{nchain}{Number of Markov chains to run. Defaults to 4.} + +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} + +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} + +\item{inits}{Character value specifying how the initial values should be generated. +Possible options are "vb" (default), "fixed", "random", or your own initial values.} + +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} + +\item{modelRegressor}{Whether to export model-based regressors (\code{TRUE} or \code{FALSE}). +Not available for this model.} + +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to \code{FALSE}.} + +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to \code{FALSE}. +If set to \code{TRUE}, it includes: "choice_os", "RT_os"} + +\item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: +\describe{ + \item{RTbound}{Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} + + + + + + + + +}} +} +\value{ +A class "hBayesDM" object \code{modelData} with the following components: +\describe{ + \item{model}{Character value that is the name of the model (\\code{"pstRT_ddm"}).} + \item{allIndPars}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{parVals}{List object containing the posterior samples over different parameters.} + \item{fit}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{rawdata}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{modelRegressor}{List object containing the extracted model-based regressors.} +} +} +\description{ +Hierarchical Bayesian Modeling of the Probabilistic Selection Task (with RT data) using Drift Diffusion Model. +It has the following parameters: \code{a} (boundary separation), \code{tau} (non-decision time), \code{d1} (drift rate scaling), \code{d2} (drift rate scaling), \code{d3} (drift rate scaling). + +\itemize{ + \item \strong{Task}: Probabilistic Selection Task (with RT data) (Frank et al., 2007; Frank et al., 2004) + \item \strong{Model}: Drift Diffusion Model (Pedersen et al., 2017) +} +} +\details{ +This section describes some of the function arguments in greater detail. + +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Selection Task (with RT data), there should be 4 columns of data with the + labels "subjID", "cond", "choice", "RT". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: +\describe{ + \item{subjID}{A unique identifier for each subject in the data-set.} + \item{cond}{Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3).} + \item{choice}{Integer value representing the option chosen on the given trial (1 or 2).} + \item{RT}{Float value representing the time taken for the response on the given trial.} + + + + + +} +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. +} +\examples{ +\dontrun{ +# Run the model with a given data.frame as df +output <- pstRT_ddm( + data = df, niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) + +# Run the model with example data +output <- pstRT_ddm( + data = "example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) + +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") + +# Check Rhat values (all Rhat values should be less than or equal to 1.1) +rhat(output) + +# Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) +plot(output) + +# Show the WAIC and LOOIC model fit estimates +printFit(output) +} +} +\references{ +Frank, M. J., Santamaria, A., O'Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599. + +Frank, M. J., Seeberger, L. C., & O'reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943. + +Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. +} +\seealso{ +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} +} diff --git a/R/man/pstRT_rlddm1.Rd b/R/man/pstRT_rlddm1.Rd new file mode 100644 index 00000000..735623e6 --- /dev/null +++ b/R/man/pstRT_rlddm1.Rd @@ -0,0 +1,175 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pstRT_rlddm1.R +\name{pstRT_rlddm1} +\alias{pstRT_rlddm1} +\title{Reinforcement Learning Drift Diffusion Model 1} +\usage{ +pstRT_rlddm1( + data = NULL, + niter = 4000, + nwarmup = 1000, + nchain = 4, + ncore = 1, + nthin = 1, + inits = "vb", + indPars = "mean", + modelRegressor = FALSE, + vb = FALSE, + inc_postpred = FALSE, + adapt_delta = 0.95, + stepsize = 1, + max_treedepth = 10, + ... +) +} +\arguments{ +\item{data}{Data to be modeled. It should be given as a data.frame object, +a filepath for a tab-seperated txt file, \code{"example"} to use example data, or +\code{"choose"} to choose data with an interactive window. +Columns in the dataset must include: +"subjID", "cond", "prob", "choice", "RT", "feedback". See \bold{Details} below for more information.} + +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} + +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} + +\item{nchain}{Number of Markov chains to run. Defaults to 4.} + +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} + +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} + +\item{inits}{Character value specifying how the initial values should be generated. +Possible options are "vb" (default), "fixed", "random", or your own initial values.} + +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} + +\item{modelRegressor}{Whether to export model-based regressors (\code{TRUE} or \code{FALSE}). +For this model they are: "Q1", "Q2".} + +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to \code{FALSE}.} + +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to \code{FALSE}. +If set to \code{TRUE}, it includes: "choice_os", "RT_os", "choice_sm", "RT_sm", "fd_sm"} + +\item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: +\describe{ + \item{RTbound}{Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} + \item{initQ}{Floating point value representing the model's initial value of any choice.} + + + + + + + +}} +} +\value{ +A class "hBayesDM" object \code{modelData} with the following components: +\describe{ + \item{model}{Character value that is the name of the model (\\code{"pstRT_rlddm1"}).} + \item{allIndPars}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{parVals}{List object containing the posterior samples over different parameters.} + \item{fit}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{rawdata}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{modelRegressor}{List object containing the extracted model-based regressors.} +} +} +\description{ +Hierarchical Bayesian Modeling of the Probabilistic Selection Task (with RT data) using Reinforcement Learning Drift Diffusion Model 1. +It has the following parameters: \code{a} (boundary separation), \code{tau} (non-decision time), \code{v} (drift rate scaling), \code{alpha} (learning rate). + +\itemize{ + \item \strong{Task}: Probabilistic Selection Task (with RT data) (Frank et al., 2007; Frank et al., 2004) + \item \strong{Model}: Reinforcement Learning Drift Diffusion Model 1 (Pedersen et al., 2017) +} +} +\details{ +This section describes some of the function arguments in greater detail. + +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Selection Task (with RT data), there should be 6 columns of data with the + labels "subjID", "cond", "prob", "choice", "RT", "feedback". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: +\describe{ + \item{subjID}{A unique identifier for each subject in the data-set.} + \item{cond}{Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3).} + \item{prob}{Float value representing the probability that a correct response (1) is rewarded in the current task condition.} + \item{choice}{Integer value representing the option chosen on the given trial (1 or 2).} + \item{RT}{Float value representing the time taken for the response on the given trial.} + \item{feedback}{Integer value representing the outcome of the given trial (where 'correct' == 1, and 'incorrect' == 0).} + + + +} +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. +} +\examples{ +\dontrun{ +# Run the model with a given data.frame as df +output <- pstRT_rlddm1( + data = df, niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) + +# Run the model with example data +output <- pstRT_rlddm1( + data = "example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) + +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") + +# Check Rhat values (all Rhat values should be less than or equal to 1.1) +rhat(output) + +# Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) +plot(output) + +# Show the WAIC and LOOIC model fit estimates +printFit(output) +} +} +\references{ +Frank, M. J., Santamaria, A., O'Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599. + +Frank, M. J., Seeberger, L. C., & O'reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943. + +Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. +} +\seealso{ +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} +} diff --git a/R/man/pstRT_rlddm6.Rd b/R/man/pstRT_rlddm6.Rd new file mode 100644 index 00000000..e4b31d03 --- /dev/null +++ b/R/man/pstRT_rlddm6.Rd @@ -0,0 +1,175 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pstRT_rlddm6.R +\name{pstRT_rlddm6} +\alias{pstRT_rlddm6} +\title{Reinforcement Learning Drift Diffusion Model 6} +\usage{ +pstRT_rlddm6( + data = NULL, + niter = 4000, + nwarmup = 1000, + nchain = 4, + ncore = 1, + nthin = 1, + inits = "vb", + indPars = "mean", + modelRegressor = FALSE, + vb = FALSE, + inc_postpred = FALSE, + adapt_delta = 0.95, + stepsize = 1, + max_treedepth = 10, + ... +) +} +\arguments{ +\item{data}{Data to be modeled. It should be given as a data.frame object, +a filepath for a tab-seperated txt file, \code{"example"} to use example data, or +\code{"choose"} to choose data with an interactive window. +Columns in the dataset must include: +"subjID", "iter", "cond", "prob", "choice", "RT", "feedback". See \bold{Details} below for more information.} + +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} + +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} + +\item{nchain}{Number of Markov chains to run. Defaults to 4.} + +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} + +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} + +\item{inits}{Character value specifying how the initial values should be generated. +Possible options are "vb" (default), "fixed", "random", or your own initial values.} + +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} + +\item{modelRegressor}{Whether to export model-based regressors (\code{TRUE} or \code{FALSE}). +For this model they are: "Q1", "Q2".} + +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to \code{FALSE}.} + +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to \code{FALSE}. +If set to \code{TRUE}, it includes: "choice_os", "RT_os", "choice_sm", "RT_sm", "fd_sm"} + +\item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: +\describe{ + \item{RTbound}{Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} + \item{initQ}{Floating point value representing the model's initial value of any choice.} + + + + + + + +}} +} +\value{ +A class "hBayesDM" object \code{modelData} with the following components: +\describe{ + \item{model}{Character value that is the name of the model (\\code{"pstRT_rlddm6"}).} + \item{allIndPars}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{parVals}{List object containing the posterior samples over different parameters.} + \item{fit}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{rawdata}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{modelRegressor}{List object containing the extracted model-based regressors.} +} +} +\description{ +Hierarchical Bayesian Modeling of the Probabilistic Selection Task (with RT data) using Reinforcement Learning Drift Diffusion Model 6. +It has the following parameters: \code{a} (boundary separation), \code{bp} (boundary separation power), \code{tau} (non-decision time), \code{v} (drift rate scaling), \code{alpha_pos} (learning rate for positive prediction error), \code{alpha_neg} (learning rate for negative prediction error). + +\itemize{ + \item \strong{Task}: Probabilistic Selection Task (with RT data) (Frank et al., 2007; Frank et al., 2004) + \item \strong{Model}: Reinforcement Learning Drift Diffusion Model 6 (Pedersen et al., 2017) +} +} +\details{ +This section describes some of the function arguments in greater detail. + +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Selection Task (with RT data), there should be 7 columns of data with the + labels "subjID", "iter", "cond", "prob", "choice", "RT", "feedback". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: +\describe{ + \item{subjID}{A unique identifier for each subject in the data-set.} + \item{iter}{Integer value representing the trial number for each task condition.} + \item{cond}{Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3).} + \item{prob}{Float value representing the probability that a correct response (1) is rewarded in the current task condition.} + \item{choice}{Integer value representing the option chosen on the given trial (1 or 2).} + \item{RT}{Float value representing the time taken for the response on the given trial.} + \item{feedback}{Integer value representing the outcome of the given trial (where 'correct' == 1, and 'incorrect' == 0).} + + +} +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. +} +\examples{ +\dontrun{ +# Run the model with a given data.frame as df +output <- pstRT_rlddm6( + data = df, niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) + +# Run the model with example data +output <- pstRT_rlddm6( + data = "example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) + +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") + +# Check Rhat values (all Rhat values should be less than or equal to 1.1) +rhat(output) + +# Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) +plot(output) + +# Show the WAIC and LOOIC model fit estimates +printFit(output) +} +} +\references{ +Frank, M. J., Santamaria, A., O'Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599. + +Frank, M. J., Seeberger, L. C., & O'reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943. + +Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. +} +\seealso{ +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} +} diff --git a/R/man/pst_Q.Rd b/R/man/pst_Q.Rd index 70c1d14e..3f8168a2 100644 --- a/R/man/pst_Q.Rd +++ b/R/man/pst_Q.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Selection Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://www.unige.ch/fapse/e3lab/members1/phd-candidates/david-munoz-tord}{David Munoz Tord} <\email{david.munoztord@unige.ch}>} } \examples{ diff --git a/R/man/pst_gainloss_Q.Rd b/R/man/pst_gainloss_Q.Rd index b5c06f2f..1a997c61 100644 --- a/R/man/pst_gainloss_Q.Rd +++ b/R/man/pst_gainloss_Q.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Probabilistic Selection Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} <\email{jaeyeong.yang1125@gmail.com}>} } \examples{ diff --git a/R/man/ra_noLA.Rd b/R/man/ra_noLA.Rd index a6ea8975..a25a73de 100644 --- a/R/man/ra_noLA.Rd +++ b/R/man/ra_noLA.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Risk Aversion Task, there should be 5 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/ra_noRA.Rd b/R/man/ra_noRA.Rd index 39200881..737d7afa 100644 --- a/R/man/ra_noRA.Rd +++ b/R/man/ra_noRA.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Risk Aversion Task, there should be 5 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/ra_prospect.Rd b/R/man/ra_prospect.Rd index 0b637729..ddd04c5c 100644 --- a/R/man/ra_prospect.Rd +++ b/R/man/ra_prospect.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Risk Aversion Task, there should be 5 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/rdt_happiness.Rd b/R/man/rdt_happiness.Rd index 01a0780b..85dc4dd4 100644 --- a/R/man/rdt_happiness.Rd +++ b/R/man/rdt_happiness.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Risky Decision Task, there should be 9 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/task2AFC_sdt.Rd b/R/man/task2AFC_sdt.Rd index c5c3b413..74af22a5 100644 --- a/R/man/task2AFC_sdt.Rd +++ b/R/man/task2AFC_sdt.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the 2-alternative forced choice task, there should be 3 columns of data with using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://heesunpark26.github.io/}{Heesun Park} <\email{heesunpark26@gmail.com}>} } \examples{ diff --git a/R/man/ts_par4.Rd b/R/man/ts_par4.Rd index baeb36e8..ad93a78c 100644 --- a/R/man/ts_par4.Rd +++ b/R/man/ts_par4.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred_step1", "y_pred_step2"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{trans_prob}{Common state transition probability from Stage (Level) 1 to Stage (Level) 2. Defaults to 0.7.} @@ -148,16 +139,6 @@ For the Two-Step Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/ts_par6.Rd b/R/man/ts_par6.Rd index 69e4fcd2..4a631030 100644 --- a/R/man/ts_par6.Rd +++ b/R/man/ts_par6.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred_step1", "y_pred_step2"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{trans_prob}{Common state transition probability from Stage (Level) 1 to Stage (Level) 2. Defaults to 0.7.} @@ -148,16 +139,6 @@ For the Two-Step Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/ts_par7.Rd b/R/man/ts_par7.Rd index 025073c4..68159f94 100644 --- a/R/man/ts_par7.Rd +++ b/R/man/ts_par7.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred_step1", "y_pred_step2"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, it's possible to set \strong{model-specific argument(s)} as follows: \describe{ \item{trans_prob}{Common state transition probability from Stage (Level) 1 to Stage (Level) 2. Defaults to 0.7.} @@ -148,16 +139,6 @@ For the Two-Step Task, there should be 4 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} <\email{hrpark12@gmail.com}>} } \examples{ diff --git a/R/man/ug_bayes.Rd b/R/man/ug_bayes.Rd index 081dadfa..8d21cf64 100644 --- a/R/man/ug_bayes.Rd +++ b/R/man/ug_bayes.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Norm-Training Ultimatum Game, there should be 3 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/ug_delta.Rd b/R/man/ug_delta.Rd index 0d69b720..cc42cd2a 100644 --- a/R/man/ug_delta.Rd +++ b/R/man/ug_delta.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -136,16 +127,6 @@ For the Norm-Training Ultimatum Game, there should be 3 columns of data with the \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. - -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. } \examples{ \dontrun{ diff --git a/R/man/wcs_sql.Rd b/R/man/wcs_sql.Rd index ecec2cc1..589b8397 100644 --- a/R/man/wcs_sql.Rd +++ b/R/man/wcs_sql.Rd @@ -57,15 +57,6 @@ to \code{FALSE}.} size). Defaults to \code{FALSE}. If set to \code{TRUE}, it includes: "y_pred"} -\item{adapt_delta}{Floating point value representing the target acceptance probability of a new -sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} - -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can -take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take -on each new iteration. See \bold{Details} below.} - \item{...}{For this model, there is no model-specific argument.} } \value{ @@ -137,16 +128,6 @@ For the Wisconsin Card Sorting Task, there should be 3 columns of data with the using only every \code{i == nthin} samples to generate posterior distributions. By default, \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. -\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are - advanced options that give the user more control over Stan's MCMC sampler. It is recommended - that only advanced users change the default values, as alterations can profoundly change the - sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in - Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for - more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC - Algorithm Parameters' of the \href{https://mc-stan.org/users/documentation/}{Stan User's Guide - and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical - description of these arguments. - \subsection{Contributors}{\href{https://ccs-lab.github.io/team/dayeong-min/}{Dayeong Min} <\email{mindy2801@snu.ac.kr}>} } \examples{ diff --git a/R/tests/testthat/test_pstRT_ddm.R b/R/tests/testthat/test_pstRT_ddm.R new file mode 100644 index 00000000..60ca3cdd --- /dev/null +++ b/R/tests/testthat/test_pstRT_ddm.R @@ -0,0 +1,10 @@ +context("Test pstRT_ddm") +library(hBayesDM) + +test_that("Test pstRT_ddm", { + # Do not run this test on CRAN + skip_on_cran() + + expect_output(pstRT_ddm( + data = "example", niter = 10, nwarmup = 5, nchain = 1, ncore = 1)) +}) diff --git a/R/tests/testthat/test_pstRT_rlddm1.R b/R/tests/testthat/test_pstRT_rlddm1.R new file mode 100644 index 00000000..ad8232ae --- /dev/null +++ b/R/tests/testthat/test_pstRT_rlddm1.R @@ -0,0 +1,10 @@ +context("Test pstRT_rlddm1") +library(hBayesDM) + +test_that("Test pstRT_rlddm1", { + # Do not run this test on CRAN + skip_on_cran() + + expect_output(pstRT_rlddm1( + data = "example", niter = 10, nwarmup = 5, nchain = 1, ncore = 1)) +}) diff --git a/R/tests/testthat/test_pstRT_rlddm6.R b/R/tests/testthat/test_pstRT_rlddm6.R new file mode 100644 index 00000000..64d939ce --- /dev/null +++ b/R/tests/testthat/test_pstRT_rlddm6.R @@ -0,0 +1,10 @@ +context("Test pstRT_rlddm6") +library(hBayesDM) + +test_that("Test pstRT_rlddm6", { + # Do not run this test on CRAN + skip_on_cran() + + expect_output(pstRT_rlddm6( + data = "example", niter = 10, nwarmup = 5, nchain = 1, ncore = 1)) +}) diff --git a/commons/extdata/pstRT_exampleData.txt b/commons/extdata/pstRT_exampleData.txt new file mode 100644 index 00000000..fb904e12 --- /dev/null +++ b/commons/extdata/pstRT_exampleData.txt @@ -0,0 +1,1201 @@ +"subjID" "iter" "cond" "prob" "choice" "RT" "feedback" +1 1 1 0.8 2 2.85235597206356 0 +1 1 2 0.7 2 1.10755597206356 0 +1 1 3 0.6 2 2.54175597206356 0 +1 2 1 0.8 1 2.02315597206356 1 +1 2 2 0.7 1 2.76635597206356 0 +1 2 3 0.6 2 3.54735597206356 0 +1 3 1 0.8 1 2.39215597206357 0 +1 3 2 0.7 2 2.96075597206357 1 +1 3 3 0.6 1 1.67435597206356 1 +1 4 1 0.8 1 4.93275597206356 0 +1 4 2 0.7 1 1.23655597206356 1 +1 4 3 0.6 1 2.73755597206356 1 +1 5 1 0.8 1 1.89415597206356 1 +1 5 2 0.7 2 1.45875597206356 0 +1 5 3 0.6 1 3.09135597206356 0 +1 6 1 0.8 1 1.80085597206356 1 +1 6 2 0.7 2 2.28005597206356 0 +1 6 3 0.6 1 1.94425597206356 0 +1 7 1 0.8 1 2.78585597206356 1 +1 7 2 0.7 1 4.69305597206356 1 +1 7 3 0.6 2 4.13185597206356 0 +1 8 1 0.8 1 2.04365597206356 1 +1 8 2 0.7 1 1.05245597206356 1 +1 8 3 0.6 2 3.19985597206356 0 +1 9 1 0.8 1 1.90735597206356 0 +1 9 2 0.7 1 1.07635597206356 1 +1 9 3 0.6 1 1.15395597206356 1 +1 10 1 0.8 1 1.45555597206356 1 +1 10 2 0.7 1 2.46975597206356 1 +1 10 3 0.6 1 1.12535597206356 1 +1 11 1 0.8 1 1.63785597206356 0 +1 11 2 0.7 1 3.64205597206356 1 +1 11 3 0.6 1 2.55305597206356 0 +1 12 1 0.8 1 3.62255597206356 1 +1 12 2 0.7 1 0.465955972063565 1 +1 12 3 0.6 1 2.39595597206357 1 +1 13 1 0.8 1 2.16115597206356 0 +1 13 2 0.7 1 2.71795597206356 0 +1 13 3 0.6 1 0.962755972063565 1 +1 14 1 0.8 1 0.954455972063565 1 +1 14 2 0.7 1 1.89805597206356 1 +1 14 3 0.6 2 1.06345597206356 0 +1 15 1 0.8 1 0.795155972063565 1 +1 15 2 0.7 1 1.27995597206356 1 +1 15 3 0.6 1 1.00315597206356 1 +1 16 1 0.8 1 1.16175597206356 1 +1 16 2 0.7 1 2.28275597206356 1 +1 16 3 0.6 1 1.84555597206356 1 +1 17 1 0.8 1 1.57825597206356 1 +1 17 2 0.7 1 0.535655972063565 1 +1 17 3 0.6 1 1.10985597206356 0 +1 18 1 0.8 1 0.895155972063565 1 +1 18 2 0.7 1 0.493555972063565 0 +1 18 3 0.6 1 0.592755972063565 1 +1 19 1 0.8 1 0.787655972063565 1 +1 19 2 0.7 1 0.646055972063565 1 +1 19 3 0.6 1 2.37945597206356 1 +1 20 1 0.8 1 0.875855972063565 1 +1 20 2 0.7 1 1.59665597206356 1 +1 20 3 0.6 1 0.684855972063565 1 +1 21 1 0.8 1 1.25795597206356 1 +1 21 2 0.7 2 0.760755972063565 0 +1 21 3 0.6 1 0.833755972063565 1 +1 22 1 0.8 1 1.04085597206356 0 +1 22 2 0.7 1 2.03845597206356 1 +1 22 3 0.6 1 1.45245597206356 1 +1 23 1 0.8 1 0.935055972063565 1 +1 23 2 0.7 1 1.02785597206356 1 +1 23 3 0.6 1 0.754855972063565 1 +1 24 1 0.8 1 0.948155972063565 0 +1 24 2 0.7 1 0.809555972063565 1 +1 24 3 0.6 1 1.47615597206356 1 +1 25 1 0.8 1 0.604555972063565 1 +1 25 2 0.7 1 1.05635597206356 0 +1 25 3 0.6 1 1.31235597206356 1 +1 26 1 0.8 1 0.997255972063565 1 +1 26 2 0.7 1 1.12785597206356 1 +1 26 3 0.6 1 2.38725597206356 0 +1 27 1 0.8 1 1.18665597206356 1 +1 27 2 0.7 1 1.55005597206356 1 +1 27 3 0.6 1 1.29345597206356 1 +1 28 1 0.8 1 0.924555972063565 1 +1 28 2 0.7 1 1.05575597206356 0 +1 28 3 0.6 1 1.13835597206356 0 +1 29 1 0.8 1 1.17395597206356 1 +1 29 2 0.7 1 0.697755972063565 0 +1 29 3 0.6 1 0.766355972063565 0 +1 30 1 0.8 1 0.842055972063565 0 +1 30 2 0.7 1 4.21565597206356 0 +1 30 3 0.6 1 1.28245597206356 1 +1 31 1 0.8 1 0.685655972063565 0 +1 31 2 0.7 1 1.26045597206356 1 +1 31 3 0.6 1 0.859055972063565 1 +1 32 1 0.8 1 0.503755972063565 1 +1 32 2 0.7 1 1.26135597206356 1 +1 32 3 0.6 1 0.733555972063565 1 +1 33 1 0.8 1 1.04435597206356 0 +1 33 2 0.7 1 0.975555972063565 0 +1 33 3 0.6 1 0.607155972063565 0 +1 34 1 0.8 1 0.885155972063565 1 +1 34 2 0.7 1 1.67075597206356 1 +1 34 3 0.6 1 0.757355972063565 1 +1 35 1 0.8 1 0.970055972063565 1 +1 35 2 0.7 1 1.05245597206356 1 +1 35 3 0.6 1 2.91365597206356 1 +1 36 1 0.8 1 1.25205597206356 1 +1 36 2 0.7 2 0.917455972063565 0 +1 36 3 0.6 1 0.986455972063565 1 +1 37 1 0.8 1 0.749655972063565 1 +1 37 2 0.7 1 1.00565597206356 0 +1 37 3 0.6 1 0.920455972063565 0 +1 38 1 0.8 1 1.48475597206356 1 +1 38 2 0.7 1 0.519955972063565 0 +1 38 3 0.6 1 0.869755972063565 0 +1 39 1 0.8 1 0.648155972063565 0 +1 39 2 0.7 1 1.11155597206356 1 +1 39 3 0.6 1 0.770355972063565 1 +1 40 1 0.8 1 0.552155972063565 1 +1 40 2 0.7 1 0.724555972063565 1 +1 40 3 0.6 1 0.921155972063565 1 +2 1 1 0.8 2 4.70084867525102 0 +2 1 2 0.7 2 1.69744867525102 0 +2 1 3 0.6 1 2.19584867525102 0 +2 2 1 0.8 1 2.66434867525102 1 +2 2 2 0.7 1 1.11194867525102 1 +2 2 3 0.6 1 1.87874867525102 0 +2 3 1 0.8 1 0.746148675251019 1 +2 3 2 0.7 2 1.09034867525102 0 +2 3 3 0.6 2 1.21674867525102 0 +2 4 1 0.8 1 2.75444867525102 1 +2 4 2 0.7 1 0.947448675251019 0 +2 4 3 0.6 1 1.26184867525102 0 +2 5 1 0.8 1 0.748148675251019 0 +2 5 2 0.7 2 1.13654867525102 1 +2 5 3 0.6 2 0.959548675251019 1 +2 6 1 0.8 1 1.40884867525102 1 +2 6 2 0.7 2 0.987448675251019 0 +2 6 3 0.6 2 1.55624867525102 0 +2 7 1 0.8 1 0.969948675251019 1 +2 7 2 0.7 2 1.41214867525102 0 +2 7 3 0.6 2 1.89914867525102 1 +2 8 1 0.8 1 0.653648675251019 1 +2 8 2 0.7 2 0.789848675251019 1 +2 8 3 0.6 2 1.50584867525102 0 +2 9 1 0.8 1 0.736848675251019 1 +2 9 2 0.7 2 1.08624867525102 1 +2 9 3 0.6 2 2.09244867525102 1 +2 10 1 0.8 1 0.529948675251019 1 +2 10 2 0.7 2 0.853948675251019 0 +2 10 3 0.6 2 0.812148675251019 0 +2 11 1 0.8 1 0.980548675251019 1 +2 11 2 0.7 2 0.687748675251019 1 +2 11 3 0.6 2 0.602148675251019 0 +2 12 1 0.8 1 1.20834867525102 1 +2 12 2 0.7 2 0.978148675251019 0 +2 12 3 0.6 2 0.824948675251019 1 +2 13 1 0.8 1 0.746648675251019 1 +2 13 2 0.7 2 0.785048675251019 0 +2 13 3 0.6 2 0.597048675251019 0 +2 14 1 0.8 1 1.16464867525102 1 +2 14 2 0.7 2 0.691048675251019 0 +2 14 3 0.6 2 0.516048675251019 0 +2 15 1 0.8 1 0.715848675251019 1 +2 15 2 0.7 2 0.785448675251019 1 +2 15 3 0.6 2 1.53684867525102 0 +2 16 1 0.8 1 0.515548675251019 0 +2 16 2 0.7 2 0.564148675251019 0 +2 16 3 0.6 2 1.01214867525102 0 +2 17 1 0.8 1 1.19714867525102 0 +2 17 2 0.7 2 2.48314867525102 0 +2 17 3 0.6 2 1.00674867525102 1 +2 18 1 0.8 1 1.19474867525102 1 +2 18 2 0.7 2 2.09114867525102 1 +2 18 3 0.6 1 0.747748675251019 0 +2 19 1 0.8 1 0.858948675251019 1 +2 19 2 0.7 2 1.99074867525102 1 +2 19 3 0.6 2 0.631148675251019 0 +2 20 1 0.8 1 0.643048675251019 1 +2 20 2 0.7 1 1.27264867525102 0 +2 20 3 0.6 2 3.96644867525102 1 +2 21 1 0.8 1 0.737048675251019 1 +2 21 2 0.7 2 0.896248675251019 0 +2 21 3 0.6 2 1.90924867525102 0 +2 22 1 0.8 1 0.610648675251019 1 +2 22 2 0.7 2 1.78704867525102 0 +2 22 3 0.6 2 0.563248675251019 0 +2 23 1 0.8 1 0.733648675251019 1 +2 23 2 0.7 2 0.577448675251019 1 +2 23 3 0.6 2 0.911648675251019 0 +2 24 1 0.8 1 0.674648675251019 1 +2 24 2 0.7 2 0.702048675251019 0 +2 24 3 0.6 2 0.588248675251019 0 +2 25 1 0.8 1 0.493048675251019 1 +2 25 2 0.7 2 1.75624867525102 0 +2 25 3 0.6 2 1.07944867525102 1 +2 26 1 0.8 1 0.611548675251019 1 +2 26 2 0.7 2 0.643748675251019 0 +2 26 3 0.6 2 0.566748675251019 0 +2 27 1 0.8 1 0.608748675251019 0 +2 27 2 0.7 2 0.824148675251019 0 +2 27 3 0.6 2 0.686948675251019 1 +2 28 1 0.8 1 0.550948675251019 1 +2 28 2 0.7 2 1.33714867525102 0 +2 28 3 0.6 2 0.491548675251019 0 +2 29 1 0.8 1 0.849348675251019 1 +2 29 2 0.7 2 2.50414867525102 1 +2 29 3 0.6 2 0.490348675251019 1 +2 30 1 0.8 1 0.487348675251019 1 +2 30 2 0.7 2 0.599948675251019 0 +2 30 3 0.6 2 0.847548675251019 1 +2 31 1 0.8 1 0.797548675251019 0 +2 31 2 0.7 2 0.882948675251019 1 +2 31 3 0.6 2 0.759948675251019 1 +2 32 1 0.8 1 0.841848675251019 1 +2 32 2 0.7 2 0.567848675251019 0 +2 32 3 0.6 2 1.21684867525102 1 +2 33 1 0.8 1 1.43724867525102 1 +2 33 2 0.7 2 1.56664867525102 0 +2 33 3 0.6 2 0.737048675251019 0 +2 34 1 0.8 1 0.673048675251019 0 +2 34 2 0.7 2 0.634448675251019 0 +2 34 3 0.6 2 0.500248675251019 0 +2 35 1 0.8 1 0.903448675251019 1 +2 35 2 0.7 2 1.18224867525102 0 +2 35 3 0.6 2 0.898448675251019 1 +2 36 1 0.8 1 0.631248675251019 1 +2 36 2 0.7 2 0.554248675251019 0 +2 36 3 0.6 2 0.778448675251019 0 +2 37 1 0.8 1 0.839048675251019 1 +2 37 2 0.7 2 1.20444867525102 0 +2 37 3 0.6 2 0.872248675251019 0 +2 38 1 0.8 1 0.636848675251019 1 +2 38 2 0.7 2 1.74224867525102 0 +2 38 3 0.6 2 1.63344867525102 0 +2 39 1 0.8 1 1.02284867525102 1 +2 39 2 0.7 2 0.951048675251019 0 +2 39 3 0.6 2 0.551048675251019 0 +2 40 1 0.8 1 0.578448675251019 0 +2 40 2 0.7 2 0.714848675251019 1 +2 40 3 0.6 2 0.948648675251019 0 +3 1 1 0.8 1 1.34250086176272 1 +3 1 2 0.7 1 1.34350086176272 1 +3 1 3 0.6 2 2.01210086176272 1 +3 2 1 0.8 1 0.904100861762718 0 +3 2 2 0.7 1 1.71290086176272 0 +3 2 3 0.6 1 0.689700861762718 0 +3 3 1 0.8 1 2.02540086176272 0 +3 3 2 0.7 2 2.02460086176272 0 +3 3 3 0.6 1 3.18180086176272 1 +3 4 1 0.8 1 4.13220086176272 1 +3 4 2 0.7 1 1.62740086176272 1 +3 4 3 0.6 1 0.756800861762718 0 +3 5 1 0.8 1 1.72600086176272 1 +3 5 2 0.7 2 0.961800861762718 0 +3 5 3 0.6 2 1.28460086176272 0 +3 6 1 0.8 1 2.53340086176272 1 +3 6 2 0.7 1 1.06180086176272 1 +3 6 3 0.6 2 0.614800861762718 0 +3 7 1 0.8 1 2.16780086176272 1 +3 7 2 0.7 1 2.85440086176272 1 +3 7 3 0.6 1 0.758400861762718 1 +3 8 1 0.8 1 0.888000861762718 1 +3 8 2 0.7 1 1.16380086176272 0 +3 8 3 0.6 1 0.921800861762718 1 +3 9 1 0.8 1 0.744600861762718 1 +3 9 2 0.7 1 1.18600086176272 1 +3 9 3 0.6 2 1.08720086176272 0 +3 10 1 0.8 1 1.14140086176272 1 +3 10 2 0.7 1 0.588200861762718 1 +3 10 3 0.6 2 1.41460086176272 0 +3 11 1 0.8 1 1.99420086176272 1 +3 11 2 0.7 1 0.577400861762718 1 +3 11 3 0.6 1 2.22680086176272 1 +3 12 1 0.8 1 0.710000861762718 1 +3 12 2 0.7 1 0.615400861762718 0 +3 12 3 0.6 2 1.62040086176272 0 +3 13 1 0.8 1 0.564200861762718 1 +3 13 2 0.7 1 0.891200861762718 0 +3 13 3 0.6 2 1.57560086176272 1 +3 14 1 0.8 1 1.10960086176272 0 +3 14 2 0.7 1 0.632800861762718 0 +3 14 3 0.6 1 0.840200861762718 0 +3 15 1 0.8 1 0.753200861762718 1 +3 15 2 0.7 1 0.996400861762718 1 +3 15 3 0.6 1 1.76880086176272 1 +3 16 1 0.8 1 0.565000861762718 1 +3 16 2 0.7 1 0.777400861762718 1 +3 16 3 0.6 1 1.37760086176272 1 +3 17 1 0.8 1 0.609400861762718 1 +3 17 2 0.7 1 0.774600861762718 1 +3 17 3 0.6 1 1.22600086176272 1 +3 18 1 0.8 1 0.616000861762718 1 +3 18 2 0.7 1 0.716200861762718 1 +3 18 3 0.6 2 0.658200861762718 0 +3 19 1 0.8 1 0.610000861762718 1 +3 19 2 0.7 1 0.900000861762718 0 +3 19 3 0.6 1 1.81900086176272 1 +3 20 1 0.8 1 0.903600861762718 1 +3 20 2 0.7 1 0.698000861762718 1 +3 20 3 0.6 1 1.20820086176272 1 +3 21 1 0.8 1 0.504000861762718 1 +3 21 2 0.7 1 0.609600861762718 0 +3 21 3 0.6 1 0.639800861762718 1 +3 22 1 0.8 1 0.719000861762718 1 +3 22 2 0.7 1 0.915600861762718 0 +3 22 3 0.6 1 1.13920086176272 0 +3 23 1 0.8 1 0.867800861762718 1 +3 23 2 0.7 1 0.535600861762718 0 +3 23 3 0.6 1 1.47320086176272 1 +3 24 1 0.8 1 1.29020086176272 0 +3 24 2 0.7 1 0.697000861762718 1 +3 24 3 0.6 1 2.10740086176272 0 +3 25 1 0.8 1 1.13530086176272 1 +3 25 2 0.7 1 0.985100861762718 1 +3 25 3 0.6 1 0.628700861762718 0 +3 26 1 0.8 1 0.655700861762718 1 +3 26 2 0.7 1 0.599900861762718 1 +3 26 3 0.6 1 0.924500861762718 0 +3 27 1 0.8 1 0.628900861762718 0 +3 27 2 0.7 1 0.523500861762718 1 +3 27 3 0.6 1 0.625100861762718 1 +3 28 1 0.8 1 1.25130086176272 1 +3 28 2 0.7 1 1.06330086176272 1 +3 28 3 0.6 1 0.780700861762718 1 +3 29 1 0.8 1 0.717300861762718 1 +3 29 2 0.7 1 1.69010086176272 1 +3 29 3 0.6 2 1.96170086176272 1 +3 30 1 0.8 1 0.702100861762718 1 +3 30 2 0.7 1 0.688900861762718 0 +3 30 3 0.6 1 0.727300861762718 0 +3 31 1 0.8 1 0.597100861762718 0 +3 31 2 0.7 1 1.32710086176272 1 +3 31 3 0.6 2 0.775500861762718 0 +3 32 1 0.8 1 0.918300861762718 0 +3 32 2 0.7 1 0.798100861762718 1 +3 32 3 0.6 1 0.897700861762718 1 +3 33 1 0.8 1 0.659100861762718 1 +3 33 2 0.7 1 0.686100861762718 1 +3 33 3 0.6 1 0.841300861762718 0 +3 34 1 0.8 1 0.611900861762718 1 +3 34 2 0.7 1 1.41830086176272 0 +3 34 3 0.6 1 0.950500861762718 0 +3 35 1 0.8 1 1.42070086176272 1 +3 35 2 0.7 1 1.47130086176272 1 +3 35 3 0.6 1 1.07090086176272 0 +3 36 1 0.8 1 0.837500861762718 1 +3 36 2 0.7 2 0.898300861762718 0 +3 36 3 0.6 1 0.889700861762718 0 +3 37 1 0.8 1 1.15130086176272 1 +3 37 2 0.7 1 1.06530086176272 0 +3 37 3 0.6 1 1.19990086176272 0 +3 38 1 0.8 1 0.876900861762718 1 +3 38 2 0.7 1 0.721900861762718 1 +3 38 3 0.6 1 0.820900861762718 1 +3 39 1 0.8 1 0.855700861762718 1 +3 39 2 0.7 1 0.678100861762718 1 +3 39 3 0.6 1 2.00350086176272 0 +3 40 1 0.8 1 0.685100861762718 1 +3 40 2 0.7 1 1.70730086176272 0 +3 40 3 0.6 2 1.77670086176272 1 +4 1 1 0.8 2 1.55335209703466 0 +4 1 2 0.7 1 5.90215209703466 0 +4 1 3 0.6 1 4.57375209703466 0 +4 2 1 0.8 2 1.61075209703466 0 +4 2 2 0.7 1 1.26295209703466 1 +4 2 3 0.6 2 1.13075209703466 0 +4 3 1 0.8 1 0.86385209703466 1 +4 3 2 0.7 1 1.45105209703466 1 +4 3 3 0.6 1 1.45705209703466 0 +4 4 1 0.8 1 0.83565209703466 1 +4 4 2 0.7 1 1.54925209703466 1 +4 4 3 0.6 1 0.68665209703466 1 +4 5 1 0.8 1 0.93145209703466 1 +4 5 2 0.7 1 0.62705209703466 0 +4 5 3 0.6 2 1.47665209703466 0 +4 6 1 0.8 1 0.86925209703466 1 +4 6 2 0.7 1 0.59125209703466 1 +4 6 3 0.6 2 2.53285209703466 1 +4 7 1 0.8 1 1.08875209703466 1 +4 7 2 0.7 1 2.58655209703466 0 +4 7 3 0.6 1 1.83335209703466 1 +4 8 1 0.8 1 0.56375209703466 1 +4 8 2 0.7 1 0.91075209703466 1 +4 8 3 0.6 1 0.63315209703466 1 +4 9 1 0.8 1 0.54235209703466 0 +4 9 2 0.7 2 0.71195209703466 0 +4 9 3 0.6 1 0.65695209703466 1 +4 10 1 0.8 1 0.64055209703466 1 +4 10 2 0.7 1 0.65275209703466 0 +4 10 3 0.6 1 1.06735209703466 1 +4 11 1 0.8 1 0.55315209703466 1 +4 11 2 0.7 1 1.14275209703466 1 +4 11 3 0.6 1 1.53675209703466 1 +4 12 1 0.8 1 0.64225209703466 1 +4 12 2 0.7 1 1.01205209703466 0 +4 12 3 0.6 1 0.76785209703466 1 +4 13 1 0.8 1 0.56595209703466 1 +4 13 2 0.7 1 1.17775209703466 1 +4 13 3 0.6 1 0.69475209703466 1 +4 14 1 0.8 1 0.55135209703466 1 +4 14 2 0.7 1 0.68175209703466 1 +4 14 3 0.6 1 0.75635209703466 0 +4 15 1 0.8 1 0.46525209703466 1 +4 15 2 0.7 1 0.51205209703466 0 +4 15 3 0.6 1 1.01265209703466 1 +4 16 1 0.8 1 0.79415209703466 1 +4 16 2 0.7 1 0.63975209703466 0 +4 16 3 0.6 2 0.94955209703466 0 +4 17 1 0.8 1 0.89505209703466 1 +4 17 2 0.7 1 0.69445209703466 1 +4 17 3 0.6 1 0.64325209703466 0 +4 18 1 0.8 1 0.59165209703466 1 +4 18 2 0.7 1 0.65725209703466 0 +4 18 3 0.6 1 0.77945209703466 1 +4 19 1 0.8 1 0.62955209703466 1 +4 19 2 0.7 1 0.47775209703466 1 +4 19 3 0.6 1 0.45515209703466 0 +4 20 1 0.8 1 0.42245209703466 1 +4 20 2 0.7 1 0.48005209703466 1 +4 20 3 0.6 2 1.11285209703466 0 +4 21 1 0.8 1 0.60595209703466 0 +4 21 2 0.7 1 1.09575209703466 0 +4 21 3 0.6 1 0.73995209703466 0 +4 22 1 0.8 1 0.75995209703466 1 +4 22 2 0.7 1 0.92875209703466 1 +4 22 3 0.6 1 0.65015209703466 0 +4 23 1 0.8 1 0.73985209703466 1 +4 23 2 0.7 1 0.57705209703466 1 +4 23 3 0.6 1 0.54525209703466 1 +4 24 1 0.8 1 0.65975209703466 1 +4 24 2 0.7 1 0.60935209703466 1 +4 24 3 0.6 1 0.98695209703466 0 +4 25 1 0.8 1 0.53755209703466 1 +4 25 2 0.7 1 0.65435209703466 1 +4 25 3 0.6 1 0.68015209703466 0 +4 26 1 0.8 1 0.57025209703466 1 +4 26 2 0.7 1 0.73745209703466 1 +4 26 3 0.6 1 1.09025209703466 1 +4 27 1 0.8 1 0.84765209703466 1 +4 27 2 0.7 1 0.56165209703466 1 +4 27 3 0.6 1 0.59505209703466 1 +4 28 1 0.8 1 0.45255209703466 1 +4 28 2 0.7 1 0.80435209703466 1 +4 28 3 0.6 1 0.46735209703466 0 +4 29 1 0.8 1 0.45755209703466 1 +4 29 2 0.7 1 0.44415209703466 1 +4 29 3 0.6 2 0.75035209703466 0 +4 30 1 0.8 1 0.66085209703466 0 +4 30 2 0.7 1 0.66125209703466 1 +4 30 3 0.6 1 0.75365209703466 1 +4 31 1 0.8 1 1.11765209703466 1 +4 31 2 0.7 1 0.69705209703466 1 +4 31 3 0.6 1 0.58265209703466 0 +4 32 1 0.8 1 0.45075209703466 1 +4 32 2 0.7 1 0.72935209703466 1 +4 32 3 0.6 1 0.53355209703466 0 +4 33 1 0.8 1 0.48515209703466 1 +4 33 2 0.7 1 0.68515209703466 0 +4 33 3 0.6 1 0.49575209703466 1 +4 34 1 0.8 1 0.47405209703466 1 +4 34 2 0.7 1 0.40865209703466 1 +4 34 3 0.6 1 0.88145209703466 1 +4 35 1 0.8 1 1.24185209703466 1 +4 35 2 0.7 1 0.46005209703466 1 +4 35 3 0.6 1 0.45225209703466 0 +4 36 1 0.8 1 0.54285209703466 1 +4 36 2 0.7 1 0.67825209703466 1 +4 36 3 0.6 1 0.59705209703466 1 +4 37 1 0.8 1 1.08655209703466 0 +4 37 2 0.7 1 0.48395209703466 1 +4 37 3 0.6 1 0.48275209703466 1 +4 38 1 0.8 1 0.49595209703466 0 +4 38 2 0.7 1 0.57595209703466 1 +4 38 3 0.6 1 0.96235209703466 0 +4 39 1 0.8 1 0.44095209703466 1 +4 39 2 0.7 1 0.87995209703466 1 +4 39 3 0.6 1 0.77615209703466 1 +4 40 1 0.8 1 0.52765209703466 1 +4 40 2 0.7 1 0.56585209703466 1 +4 40 3 0.6 1 0.82185209703466 0 +5 1 1 0.8 2 2.15232424837915 1 +5 1 2 0.7 1 2.12592424837915 0 +5 1 3 0.6 2 1.21552424837915 1 +5 2 1 0.8 1 0.767324248379146 1 +5 2 2 0.7 2 1.70452424837915 1 +5 2 3 0.6 2 2.65012424837915 1 +5 3 1 0.8 2 1.37512424837915 0 +5 3 2 0.7 2 4.23012424837915 1 +5 3 3 0.6 1 3.23512424837915 0 +5 4 1 0.8 1 1.44082424837915 1 +5 4 2 0.7 2 0.525024248379146 0 +5 4 3 0.6 2 2.18122424837915 0 +5 5 1 0.8 2 1.21912424837915 0 +5 5 2 0.7 2 2.44172424837915 0 +5 5 3 0.6 2 1.19572424837915 0 +5 6 1 0.8 1 2.00812424837915 1 +5 6 2 0.7 1 0.653724248379146 1 +5 6 3 0.6 2 0.642124248379146 0 +5 7 1 0.8 2 1.30392424837915 0 +5 7 2 0.7 2 0.604924248379146 1 +5 7 3 0.6 2 0.708324248379146 0 +5 8 1 0.8 1 1.26892424837915 1 +5 8 2 0.7 2 1.17152424837915 0 +5 8 3 0.6 1 0.885724248379146 0 +5 9 1 0.8 2 1.18222424837915 0 +5 9 2 0.7 2 0.563024248379146 1 +5 9 3 0.6 2 1.37122424837915 0 +5 10 1 0.8 2 2.36432424837915 0 +5 10 2 0.7 1 0.562924248379146 0 +5 10 3 0.6 1 0.976324248379146 1 +5 11 1 0.8 1 1.04232424837915 1 +5 11 2 0.7 2 1.74732424837915 1 +5 11 3 0.6 2 0.606124248379146 1 +5 12 1 0.8 2 1.66432424837915 0 +5 12 2 0.7 2 0.573924248379146 0 +5 12 3 0.6 1 1.44732424837915 1 +5 13 1 0.8 1 0.795424248379146 1 +5 13 2 0.7 2 0.784224248379146 0 +5 13 3 0.6 2 2.25522424837915 1 +5 14 1 0.8 1 0.656124248379146 1 +5 14 2 0.7 1 0.617924248379146 1 +5 14 3 0.6 2 0.821524248379146 0 +5 15 1 0.8 2 0.498024248379146 0 +5 15 2 0.7 2 0.603824248379146 0 +5 15 3 0.6 2 1.80202424837915 0 +5 16 1 0.8 2 1.69032424837915 0 +5 16 2 0.7 1 0.568324248379146 1 +5 16 3 0.6 2 0.547524248379146 1 +5 17 1 0.8 1 0.565524248379146 1 +5 17 2 0.7 2 0.460924248379146 0 +5 17 3 0.6 2 0.607124248379146 0 +5 18 1 0.8 1 0.625624248379146 1 +5 18 2 0.7 1 0.614224248379146 1 +5 18 3 0.6 1 2.31662424837915 1 +5 19 1 0.8 1 0.498224248379146 1 +5 19 2 0.7 1 0.590424248379146 0 +5 19 3 0.6 1 1.59482424837915 1 +5 20 1 0.8 2 0.501924248379146 0 +5 20 2 0.7 2 1.22712424837915 0 +5 20 3 0.6 2 0.947324248379146 0 +5 21 1 0.8 1 1.33352424837915 1 +5 21 2 0.7 2 1.04972424837915 1 +5 21 3 0.6 1 0.836324248379146 1 +5 22 1 0.8 1 2.31982424837915 1 +5 22 2 0.7 1 0.959224248379146 0 +5 22 3 0.6 1 1.09002424837915 0 +5 23 1 0.8 1 0.687424248379146 0 +5 23 2 0.7 2 2.47082424837915 0 +5 23 3 0.6 1 0.835024248379146 1 +5 24 1 0.8 1 0.571724248379146 1 +5 24 2 0.7 2 0.908924248379146 0 +5 24 3 0.6 1 0.533724248379146 1 +5 25 1 0.8 1 0.501724248379146 1 +5 25 2 0.7 1 0.759724248379146 1 +5 25 3 0.6 2 0.620124248379146 1 +5 26 1 0.8 1 0.428424248379146 1 +5 26 2 0.7 1 1.60122424837915 1 +5 26 3 0.6 1 0.630224248379146 0 +5 27 1 0.8 1 0.840024248379146 1 +5 27 2 0.7 2 0.854224248379146 1 +5 27 3 0.6 2 0.463024248379146 0 +5 28 1 0.8 2 0.548024248379146 1 +5 28 2 0.7 1 1.21842424837915 1 +5 28 3 0.6 1 0.538824248379146 0 +5 29 1 0.8 2 1.32692424837915 0 +5 29 2 0.7 2 0.561924248379146 0 +5 29 3 0.6 1 1.09072424837915 1 +5 30 1 0.8 1 0.948924248379146 1 +5 30 2 0.7 2 0.571324248379146 0 +5 30 3 0.6 1 1.53152424837915 0 +5 31 1 0.8 1 0.508124248379146 1 +5 31 2 0.7 1 0.590724248379146 1 +5 31 3 0.6 2 2.13232424837915 0 +5 32 1 0.8 1 1.21162424837915 1 +5 32 2 0.7 1 0.415224248379146 1 +5 32 3 0.6 2 0.624024248379146 1 +5 33 1 0.8 2 0.531624248379146 0 +5 33 2 0.7 1 1.01022424837915 0 +5 33 3 0.6 1 1.17362424837915 1 +5 34 1 0.8 1 0.532824248379146 0 +5 34 2 0.7 2 2.26962424837915 1 +5 34 3 0.6 2 1.02002424837915 0 +5 35 1 0.8 1 1.46872424837915 1 +5 35 2 0.7 1 1.26372424837915 1 +5 35 3 0.6 2 2.94952424837915 1 +5 36 1 0.8 1 0.913124248379146 1 +5 36 2 0.7 1 2.03832424837915 1 +5 36 3 0.6 2 0.663724248379146 1 +5 37 1 0.8 1 1.01812424837915 1 +5 37 2 0.7 1 1.73332424837915 1 +5 37 3 0.6 2 1.06512424837915 0 +5 38 1 0.8 1 0.484124248379146 1 +5 38 2 0.7 2 0.744524248379146 0 +5 38 3 0.6 1 0.576924248379146 0 +5 39 1 0.8 1 0.461224248379146 1 +5 39 2 0.7 2 1.46702424837915 0 +5 39 3 0.6 1 1.65202424837915 0 +5 40 1 0.8 1 1.03042424837915 1 +5 40 2 0.7 2 0.498024248379146 0 +5 40 3 0.6 2 0.611624248379146 0 +6 1 1 0.8 1 0.880250206493226 1 +6 1 2 0.7 1 0.573050206493226 1 +6 1 3 0.6 2 2.15025020649323 1 +6 2 1 0.8 2 1.11695020649323 0 +6 2 2 0.7 1 1.25795020649323 1 +6 2 3 0.6 2 1.11415020649323 1 +6 3 1 0.8 2 0.875150206493226 0 +6 3 2 0.7 1 2.92855020649323 0 +6 3 3 0.6 2 1.46775020649323 0 +6 4 1 0.8 1 1.11855020649323 1 +6 4 2 0.7 1 0.827950206493226 0 +6 4 3 0.6 2 1.44015020649323 0 +6 5 1 0.8 1 0.991050206493226 1 +6 5 2 0.7 1 0.925650206493226 1 +6 5 3 0.6 2 1.22025020649323 0 +6 6 1 0.8 1 0.778450206493226 1 +6 6 2 0.7 1 1.33665020649323 1 +6 6 3 0.6 2 1.19545020649323 0 +6 7 1 0.8 1 1.34465020649323 1 +6 7 2 0.7 1 1.23945020649323 1 +6 7 3 0.6 1 1.38805020649323 1 +6 8 1 0.8 1 0.494450206493226 1 +6 8 2 0.7 1 1.88845020649323 1 +6 8 3 0.6 2 1.77365020649323 1 +6 9 1 0.8 1 0.505850206493226 1 +6 9 2 0.7 1 0.992050206493226 1 +6 9 3 0.6 2 0.722450206493226 1 +6 10 1 0.8 1 0.742950206493226 0 +6 10 2 0.7 1 0.561950206493226 0 +6 10 3 0.6 1 1.23795020649323 1 +6 11 1 0.8 1 0.769450206493226 0 +6 11 2 0.7 1 0.541650206493226 1 +6 11 3 0.6 2 1.05565020649323 1 +6 12 1 0.8 1 0.770950206493226 1 +6 12 2 0.7 1 1.19815020649323 1 +6 12 3 0.6 2 0.590150206493226 1 +6 13 1 0.8 1 0.694650206493226 0 +6 13 2 0.7 1 0.589050206493226 1 +6 13 3 0.6 2 0.499450206493226 1 +6 14 1 0.8 1 0.583150206493226 0 +6 14 2 0.7 1 0.654750206493226 1 +6 14 3 0.6 2 0.799350206493226 1 +6 15 1 0.8 1 0.867050206493226 1 +6 15 2 0.7 1 0.823050206493226 0 +6 15 3 0.6 1 1.09045020649323 1 +6 16 1 0.8 1 0.766150206493226 0 +6 16 2 0.7 1 0.609950206493226 1 +6 16 3 0.6 2 0.639150206493226 1 +6 17 1 0.8 1 0.633750206493226 1 +6 17 2 0.7 1 0.776950206493226 1 +6 17 3 0.6 2 1.19635020649323 0 +6 18 1 0.8 1 0.697050206493226 1 +6 18 2 0.7 1 0.564050206493226 1 +6 18 3 0.6 2 0.925250206493226 0 +6 19 1 0.8 1 0.621550206493226 1 +6 19 2 0.7 1 0.581150206493226 1 +6 19 3 0.6 2 0.572150206493226 0 +6 20 1 0.8 1 0.681150206493226 0 +6 20 2 0.7 1 0.650750206493226 0 +6 20 3 0.6 1 3.04935020649323 1 +6 21 1 0.8 1 0.812450206493226 1 +6 21 2 0.7 1 0.861650206493226 1 +6 21 3 0.6 2 1.37585020649323 0 +6 22 1 0.8 1 0.756850206493226 1 +6 22 2 0.7 1 0.707450206493226 1 +6 22 3 0.6 2 0.977850206493226 0 +6 23 1 0.8 1 0.837350206493226 1 +6 23 2 0.7 1 0.489150206493226 0 +6 23 3 0.6 2 0.765550206493226 1 +6 24 1 0.8 1 0.654350206493226 0 +6 24 2 0.7 1 0.517550206493226 1 +6 24 3 0.6 2 0.928350206493226 0 +6 25 1 0.8 1 0.644450206493226 1 +6 25 2 0.7 1 1.09825020649323 1 +6 25 3 0.6 2 3.37645020649323 0 +6 26 1 0.8 1 1.04365020649323 1 +6 26 2 0.7 1 0.479250206493226 1 +6 26 3 0.6 2 0.609650206493226 0 +6 27 1 0.8 1 0.509150206493226 1 +6 27 2 0.7 1 0.638150206493226 0 +6 27 3 0.6 2 0.522350206493226 1 +6 28 1 0.8 1 0.586150206493226 0 +6 28 2 0.7 1 0.612550206493226 1 +6 28 3 0.6 2 0.872750206493226 0 +6 29 1 0.8 1 0.524250206493226 1 +6 29 2 0.7 1 0.965250206493226 1 +6 29 3 0.6 2 0.530050206493226 1 +6 30 1 0.8 1 0.639050206493226 1 +6 30 2 0.7 1 0.533250206493226 1 +6 30 3 0.6 2 1.01845020649323 1 +6 31 1 0.8 1 0.792050206493226 1 +6 31 2 0.7 1 0.772650206493226 1 +6 31 3 0.6 2 0.815050206493226 1 +6 32 1 0.8 1 0.535350206493226 1 +6 32 2 0.7 1 0.705550206493226 1 +6 32 3 0.6 2 0.663950206493226 0 +6 33 1 0.8 1 0.525750206493226 1 +6 33 2 0.7 1 0.680350206493226 1 +6 33 3 0.6 2 0.826350206493226 0 +6 34 1 0.8 1 0.695750206493226 1 +6 34 2 0.7 1 0.594950206493226 1 +6 34 3 0.6 2 0.542350206493226 0 +6 35 1 0.8 1 0.797250206493226 0 +6 35 2 0.7 1 0.779650206493226 1 +6 35 3 0.6 1 1.00865020649323 0 +6 36 1 0.8 1 1.09685020649323 1 +6 36 2 0.7 1 0.524050206493226 0 +6 36 3 0.6 1 1.39545020649323 0 +6 37 1 0.8 1 0.737850206493226 1 +6 37 2 0.7 1 0.480050206493226 0 +6 37 3 0.6 2 0.837450206493226 0 +6 38 1 0.8 1 0.716650206493226 0 +6 38 2 0.7 1 0.552450206493226 1 +6 38 3 0.6 2 0.806450206493226 0 +6 39 1 0.8 1 0.804950206493226 1 +6 39 2 0.7 1 0.908350206493226 1 +6 39 3 0.6 2 1.14695020649323 0 +6 40 1 0.8 1 0.822150206493226 1 +6 40 2 0.7 1 0.567550206493226 0 +6 40 3 0.6 2 1.13315020649323 1 +7 1 1 0.8 2 0.634306566786801 1 +7 1 2 0.7 2 0.637306566786801 0 +7 1 3 0.6 1 1.6463065667868 1 +7 2 1 0.8 1 0.516506566786801 1 +7 2 2 0.7 1 0.900506566786801 0 +7 2 3 0.6 1 3.5061065667868 1 +7 3 1 0.8 1 0.737906566786801 1 +7 3 2 0.7 1 0.515106566786801 1 +7 3 3 0.6 2 0.758306566786801 1 +7 4 1 0.8 1 2.0164065667868 1 +7 4 2 0.7 1 0.676006566786801 0 +7 4 3 0.6 1 0.845206566786801 1 +7 5 1 0.8 2 0.968206566786801 0 +7 5 2 0.7 2 2.1332065667868 1 +7 5 3 0.6 1 1.3870065667868 1 +7 6 1 0.8 2 0.997406566786801 1 +7 6 2 0.7 2 1.3432065667868 1 +7 6 3 0.6 1 0.971606566786801 1 +7 7 1 0.8 2 1.9666065667868 0 +7 7 2 0.7 2 0.662006566786801 1 +7 7 3 0.6 1 1.5368065667868 1 +7 8 1 0.8 1 1.7015065667868 1 +7 8 2 0.7 1 1.1395065667868 1 +7 8 3 0.6 2 0.985906566786801 0 +7 9 1 0.8 2 1.3025065667868 0 +7 9 2 0.7 2 3.2421065667868 0 +7 9 3 0.6 1 0.729306566786801 0 +7 10 1 0.8 1 0.784406566786801 1 +7 10 2 0.7 1 0.804406566786801 1 +7 10 3 0.6 1 1.0492065667868 1 +7 11 1 0.8 1 0.585906566786801 1 +7 11 2 0.7 1 1.4419065667868 1 +7 11 3 0.6 1 3.1283065667868 1 +7 12 1 0.8 1 0.976406566786801 1 +7 12 2 0.7 2 1.6264065667868 0 +7 12 3 0.6 1 0.624606566786801 1 +7 13 1 0.8 1 0.655306566786801 1 +7 13 2 0.7 2 3.7217065667868 0 +7 13 3 0.6 1 1.8883065667868 1 +7 14 1 0.8 1 1.6824065667868 1 +7 14 2 0.7 1 3.5998065667868 1 +7 14 3 0.6 1 0.871206566786801 1 +7 15 1 0.8 1 0.619406566786801 0 +7 15 2 0.7 2 1.8376065667868 0 +7 15 3 0.6 1 2.3466065667868 0 +7 16 1 0.8 1 1.1623065667868 1 +7 16 2 0.7 2 0.664306566786801 0 +7 16 3 0.6 1 0.958106566786801 1 +7 17 1 0.8 1 0.676006566786801 1 +7 17 2 0.7 1 2.4412065667868 1 +7 17 3 0.6 1 0.613806566786801 0 +7 18 1 0.8 1 1.2304065667868 1 +7 18 2 0.7 1 1.1590065667868 0 +7 18 3 0.6 1 1.1772065667868 0 +7 19 1 0.8 1 0.994706566786801 1 +7 19 2 0.7 1 0.922506566786801 1 +7 19 3 0.6 1 0.910906566786801 0 +7 20 1 0.8 1 1.1262065667868 1 +7 20 2 0.7 2 1.1290065667868 0 +7 20 3 0.6 1 1.5526065667868 1 +7 21 1 0.8 1 1.0252065667868 1 +7 21 2 0.7 1 1.0090065667868 1 +7 21 3 0.6 1 0.913006566786801 1 +7 22 1 0.8 1 1.9655065667868 0 +7 22 2 0.7 1 3.7057065667868 1 +7 22 3 0.6 1 0.796706566786801 1 +7 23 1 0.8 1 0.938306566786801 1 +7 23 2 0.7 1 0.789706566786801 0 +7 23 3 0.6 1 0.544706566786801 0 +7 24 1 0.8 1 2.7898065667868 0 +7 24 2 0.7 1 0.943006566786801 1 +7 24 3 0.6 1 2.0538065667868 0 +7 25 1 0.8 1 1.4716065667868 1 +7 25 2 0.7 1 0.854606566786801 1 +7 25 3 0.6 1 1.4836065667868 1 +7 26 1 0.8 1 0.572706566786801 1 +7 26 2 0.7 1 0.795506566786801 1 +7 26 3 0.6 1 3.0083065667868 0 +7 27 1 0.8 1 0.921906566786801 1 +7 27 2 0.7 1 1.0699065667868 1 +7 27 3 0.6 1 0.991506566786801 1 +7 28 1 0.8 1 0.941806566786801 1 +7 28 2 0.7 2 1.0258065667868 0 +7 28 3 0.6 1 0.905806566786801 1 +7 29 1 0.8 1 1.1124065667868 1 +7 29 2 0.7 1 1.5928065667868 1 +7 29 3 0.6 1 0.740006566786801 0 +7 30 1 0.8 1 1.1505065667868 0 +7 30 2 0.7 1 0.666906566786801 1 +7 30 3 0.6 1 0.528306566786801 0 +7 31 1 0.8 1 0.950506566786801 1 +7 31 2 0.7 1 0.573106566786801 1 +7 31 3 0.6 1 0.763106566786801 1 +7 32 1 0.8 1 1.8367065667868 0 +7 32 2 0.7 1 0.708706566786801 1 +7 32 3 0.6 1 0.734906566786801 1 +7 33 1 0.8 1 0.610606566786801 1 +7 33 2 0.7 1 0.867406566786801 0 +7 33 3 0.6 1 1.3982065667868 1 +7 34 1 0.8 1 1.1018065667868 1 +7 34 2 0.7 1 1.2236065667868 0 +7 34 3 0.6 1 1.1814065667868 1 +7 35 1 0.8 1 0.752306566786801 1 +7 35 2 0.7 1 0.549306566786801 0 +7 35 3 0.6 1 1.2239065667868 0 +7 36 1 0.8 1 1.1991065667868 0 +7 36 2 0.7 1 2.0213065667868 1 +7 36 3 0.6 1 1.0655065667868 0 +7 37 1 0.8 1 0.703106566786801 1 +7 37 2 0.7 1 0.882106566786801 1 +7 37 3 0.6 1 0.980106566786801 0 +7 38 1 0.8 1 0.946606566786801 0 +7 38 2 0.7 1 1.1252065667868 1 +7 38 3 0.6 1 1.3188065667868 0 +7 39 1 0.8 1 0.761806566786801 1 +7 39 2 0.7 1 0.720206566786801 1 +7 39 3 0.6 1 0.908606566786801 1 +7 40 1 0.8 1 1.0496065667868 0 +7 40 2 0.7 1 1.2116065667868 0 +7 40 3 0.6 1 1.2790065667868 1 +8 1 1 0.8 2 1.81115027424882 1 +8 1 2 0.7 1 0.491350274248821 1 +8 1 3 0.6 1 0.502150274248821 0 +8 2 1 0.8 2 0.520050274248821 0 +8 2 2 0.7 1 0.507250274248821 0 +8 2 3 0.6 1 1.05305027424882 1 +8 3 1 0.8 2 0.885150274248821 0 +8 3 2 0.7 1 1.47375027424882 1 +8 3 3 0.6 2 1.80835027424882 0 +8 4 1 0.8 1 0.511350274248821 0 +8 4 2 0.7 1 2.03935027424882 1 +8 4 3 0.6 1 0.862150274248821 1 +8 5 1 0.8 2 0.539550274248821 0 +8 5 2 0.7 1 0.624550274248821 1 +8 5 3 0.6 1 0.629350274248821 1 +8 6 1 0.8 1 0.728250274248821 1 +8 6 2 0.7 1 0.445650274248821 0 +8 6 3 0.6 2 0.857450274248821 0 +8 7 1 0.8 2 0.505050274248821 0 +8 7 2 0.7 1 0.547450274248821 1 +8 7 3 0.6 1 0.559650274248821 0 +8 8 1 0.8 2 0.849350274248821 0 +8 8 2 0.7 1 1.17795027424882 1 +8 8 3 0.6 1 0.514150274248821 1 +8 9 1 0.8 2 0.512250274248821 1 +8 9 2 0.7 1 0.583850274248821 1 +8 9 3 0.6 1 1.07265027424882 1 +8 10 1 0.8 1 0.786550274248821 1 +8 10 2 0.7 1 0.959950274248821 0 +8 10 3 0.6 1 0.463750274248821 1 +8 11 1 0.8 2 1.06285027424882 0 +8 11 2 0.7 1 0.762650274248821 0 +8 11 3 0.6 1 0.398050274248821 0 +8 12 1 0.8 1 1.02345027424882 0 +8 12 2 0.7 1 0.634050274248821 1 +8 12 3 0.6 1 1.84525027424882 1 +8 13 1 0.8 1 0.632350274248821 1 +8 13 2 0.7 1 0.644550274248821 1 +8 13 3 0.6 1 0.690950274248821 1 +8 14 1 0.8 2 1.15305027424882 0 +8 14 2 0.7 1 0.653050274248821 0 +8 14 3 0.6 1 0.447450274248821 1 +8 15 1 0.8 2 0.970650274248821 0 +8 15 2 0.7 2 0.452650274248821 0 +8 15 3 0.6 1 0.723850274248821 0 +8 16 1 0.8 1 0.825350274248821 1 +8 16 2 0.7 1 1.24415027424882 1 +8 16 3 0.6 2 0.696950274248821 1 +8 17 1 0.8 1 2.38645027424882 1 +8 17 2 0.7 1 0.521650274248821 0 +8 17 3 0.6 1 0.569250274248821 0 +8 18 1 0.8 1 3.44565027424882 1 +8 18 2 0.7 1 2.36265027424882 1 +8 18 3 0.6 1 0.616450274248821 0 +8 19 1 0.8 1 1.48875027424882 1 +8 19 2 0.7 1 0.546550274248821 1 +8 19 3 0.6 1 0.708950274248821 0 +8 20 1 0.8 1 0.428150274248821 1 +8 20 2 0.7 1 1.88335027424882 1 +8 20 3 0.6 1 0.488750274248821 1 +8 21 1 0.8 1 1.56505027424882 1 +8 21 2 0.7 2 1.46965027424882 0 +8 21 3 0.6 1 0.621050274248821 0 +8 22 1 0.8 1 3.14265027424882 1 +8 22 2 0.7 1 0.692650274248821 1 +8 22 3 0.6 1 0.602450274248821 0 +8 23 1 0.8 2 1.16645027424882 1 +8 23 2 0.7 1 0.631250274248821 1 +8 23 3 0.6 1 0.527450274248821 1 +8 24 1 0.8 1 1.11955027424882 1 +8 24 2 0.7 1 1.50895027424882 1 +8 24 3 0.6 1 1.56215027424882 0 +8 25 1 0.8 2 1.55035027424882 0 +8 25 2 0.7 1 0.570550274248821 0 +8 25 3 0.6 1 0.547750274248821 1 +8 26 1 0.8 1 1.21685027424882 1 +8 26 2 0.7 1 1.09245027424882 0 +8 26 3 0.6 1 0.574450274248821 0 +8 27 1 0.8 1 0.992450274248821 1 +8 27 2 0.7 2 1.54265027424882 1 +8 27 3 0.6 1 1.68965027424882 1 +8 28 1 0.8 1 0.541850274248821 1 +8 28 2 0.7 1 1.02105027424882 1 +8 28 3 0.6 1 1.40785027424882 0 +8 29 1 0.8 1 0.574950274248821 1 +8 29 2 0.7 1 1.19635027424882 1 +8 29 3 0.6 1 1.62055027424882 1 +8 30 1 0.8 2 1.11495027424882 1 +8 30 2 0.7 1 0.637750274248821 0 +8 30 3 0.6 1 0.795550274248821 1 +8 31 1 0.8 1 0.705350274248821 1 +8 31 2 0.7 1 2.55415027424882 1 +8 31 3 0.6 1 1.93915027424882 0 +8 32 1 0.8 1 0.867450274248821 1 +8 32 2 0.7 2 1.01685027424882 0 +8 32 3 0.6 1 2.54405027424882 1 +8 33 1 0.8 1 0.506250274248821 1 +8 33 2 0.7 1 0.698450274248821 0 +8 33 3 0.6 1 0.583850274248821 1 +8 34 1 0.8 1 0.995850274248821 1 +8 34 2 0.7 1 1.97065027424882 0 +8 34 3 0.6 1 0.583650274248821 1 +8 35 1 0.8 2 0.866550274248821 0 +8 35 2 0.7 1 0.888150274248821 1 +8 35 3 0.6 2 0.588350274248821 0 +8 36 1 0.8 1 1.15855027424882 0 +8 36 2 0.7 1 0.985150274248821 1 +8 36 3 0.6 1 0.608550274248821 0 +8 37 1 0.8 1 0.900350274248821 1 +8 37 2 0.7 1 1.49515027424882 0 +8 37 3 0.6 1 0.827550274248821 1 +8 38 1 0.8 2 1.49375027424882 1 +8 38 2 0.7 1 0.970750274248821 1 +8 38 3 0.6 1 0.611150274248821 1 +8 39 1 0.8 1 0.681650274248821 0 +8 39 2 0.7 1 1.09465027424882 1 +8 39 3 0.6 1 0.791050274248821 1 +8 40 1 0.8 1 2.75305027424882 0 +8 40 2 0.7 1 0.941650274248821 1 +8 40 3 0.6 1 0.647250274248821 1 +9 1 1 0.8 2 1.25235645648248 0 +9 1 2 0.7 1 1.70315645648248 1 +9 1 3 0.6 2 0.427156456482476 0 +9 2 1 0.8 2 0.687256456482476 0 +9 2 2 0.7 2 0.989056456482476 0 +9 2 3 0.6 2 1.13685645648248 0 +9 3 1 0.8 2 3.53645645648248 0 +9 3 2 0.7 1 0.892256456482476 1 +9 3 3 0.6 2 0.615656456482476 0 +9 4 1 0.8 2 1.71065645648248 0 +9 4 2 0.7 2 0.686656456482476 1 +9 4 3 0.6 1 0.918856456482476 1 +9 5 1 0.8 1 1.04835645648248 1 +9 5 2 0.7 2 0.606156456482476 0 +9 5 3 0.6 2 1.27415645648248 0 +9 6 1 0.8 2 3.95675645648248 0 +9 6 2 0.7 2 1.13855645648248 0 +9 6 3 0.6 2 0.744556456482476 1 +9 7 1 0.8 1 0.926356456482476 1 +9 7 2 0.7 1 0.918956456482476 1 +9 7 3 0.6 1 1.10415645648248 1 +9 8 1 0.8 1 0.810956456482476 1 +9 8 2 0.7 1 1.41195645648248 0 +9 8 3 0.6 2 0.950956456482476 0 +9 9 1 0.8 2 2.05635645648248 0 +9 9 2 0.7 1 0.753756456482476 1 +9 9 3 0.6 2 1.09095645648248 0 +9 10 1 0.8 1 1.15895645648248 1 +9 10 2 0.7 1 0.689356456482476 1 +9 10 3 0.6 1 2.42595645648248 0 +9 11 1 0.8 1 0.494156456482476 1 +9 11 2 0.7 1 2.37535645648248 1 +9 11 3 0.6 2 1.26455645648248 0 +9 12 1 0.8 1 0.845356456482476 1 +9 12 2 0.7 2 1.33035645648248 0 +9 12 3 0.6 1 1.02035645648248 1 +9 13 1 0.8 2 0.625056456482476 0 +9 13 2 0.7 1 0.874456456482476 1 +9 13 3 0.6 1 1.47845645648248 0 +9 14 1 0.8 1 1.06705645648248 1 +9 14 2 0.7 1 0.537856456482476 1 +9 14 3 0.6 1 0.997856456482476 0 +9 15 1 0.8 1 3.70945645648248 1 +9 15 2 0.7 2 0.719656456482476 0 +9 15 3 0.6 1 0.611656456482476 1 +9 16 1 0.8 2 1.22345645648248 0 +9 16 2 0.7 2 1.10465645648248 0 +9 16 3 0.6 2 1.26825645648248 1 +9 17 1 0.8 2 0.671256456482476 1 +9 17 2 0.7 1 1.40725645648248 1 +9 17 3 0.6 1 2.00765645648248 1 +9 18 1 0.8 1 3.23765645648248 1 +9 18 2 0.7 2 0.717256456482476 0 +9 18 3 0.6 1 0.579856456482476 1 +9 19 1 0.8 1 1.03065645648248 1 +9 19 2 0.7 2 1.32925645648248 0 +9 19 3 0.6 1 1.08085645648248 0 +9 20 1 0.8 2 1.16245645648248 1 +9 20 2 0.7 2 1.26465645648248 0 +9 20 3 0.6 2 1.02565645648248 1 +9 21 1 0.8 2 0.491856456482476 0 +9 21 2 0.7 1 1.11565645648248 1 +9 21 3 0.6 1 1.12025645648248 0 +9 22 1 0.8 2 2.07045645648248 0 +9 22 2 0.7 1 2.22925645648248 1 +9 22 3 0.6 2 3.10185645648248 0 +9 23 1 0.8 1 0.442456456482476 1 +9 23 2 0.7 1 1.86145645648248 1 +9 23 3 0.6 1 1.86025645648248 1 +9 24 1 0.8 2 0.725056456482476 0 +9 24 2 0.7 2 0.889056456482476 0 +9 24 3 0.6 2 0.911256456482476 0 +9 25 1 0.8 1 1.14765645648248 0 +9 25 2 0.7 1 1.89425645648248 1 +9 25 3 0.6 1 1.10525645648248 1 +9 26 1 0.8 1 0.714456456482476 1 +9 26 2 0.7 1 0.732856456482476 0 +9 26 3 0.6 2 0.591456456482476 1 +9 27 1 0.8 2 1.48765645648248 0 +9 27 2 0.7 2 0.695456456482476 0 +9 27 3 0.6 2 0.551056456482476 0 +9 28 1 0.8 2 1.60665645648248 0 +9 28 2 0.7 1 0.891256456482476 1 +9 28 3 0.6 2 1.02565645648248 0 +9 29 1 0.8 1 2.44865645648248 0 +9 29 2 0.7 2 0.575656456482476 0 +9 29 3 0.6 1 1.46465645648248 1 +9 30 1 0.8 1 1.17365645648248 1 +9 30 2 0.7 1 0.449856456482476 1 +9 30 3 0.6 2 0.983056456482476 1 +9 31 1 0.8 1 1.60705645648248 1 +9 31 2 0.7 1 0.857656456482476 0 +9 31 3 0.6 2 1.48065645648248 1 +9 32 1 0.8 2 0.901256456482476 0 +9 32 2 0.7 2 1.29285645648248 0 +9 32 3 0.6 1 1.33025645648248 1 +9 33 1 0.8 1 0.650656456482476 0 +9 33 2 0.7 1 1.22345645648248 0 +9 33 3 0.6 2 0.714456456482476 1 +9 34 1 0.8 2 0.584256456482476 0 +9 34 2 0.7 2 1.04845645648248 1 +9 34 3 0.6 2 0.587856456482476 1 +9 35 1 0.8 2 0.700256456482476 0 +9 35 2 0.7 1 2.67425645648248 1 +9 35 3 0.6 1 2.48265645648248 1 +9 36 1 0.8 1 0.467756456482476 1 +9 36 2 0.7 1 1.39375645648248 1 +9 36 3 0.6 2 1.96095645648248 1 +9 37 1 0.8 1 2.37215645648248 1 +9 37 2 0.7 2 0.755956456482476 0 +9 37 3 0.6 2 0.704956456482476 0 +9 38 1 0.8 2 0.871956456482476 0 +9 38 2 0.7 1 1.22935645648248 1 +9 38 3 0.6 1 0.619356456482476 1 +9 39 1 0.8 2 0.764556456482476 1 +9 39 2 0.7 2 0.619156456482476 0 +9 39 3 0.6 1 0.495156456482476 1 +9 40 1 0.8 1 0.641956456482476 1 +9 40 2 0.7 1 0.798156456482476 0 +9 40 3 0.6 2 1.41735645648248 1 +10 1 1 0.8 1 3.59942177359996 1 +10 1 2 0.7 2 1.50682177359996 0 +10 1 3 0.6 1 2.69762177359996 1 +10 2 1 0.8 1 0.872321773599958 0 +10 2 2 0.7 2 1.09392177359996 1 +10 2 3 0.6 1 0.712121773599958 1 +10 3 1 0.8 2 6.36002177359996 0 +10 3 2 0.7 1 1.01982177359996 1 +10 3 3 0.6 2 2.87722177359996 1 +10 4 1 0.8 1 2.17332177359996 1 +10 4 2 0.7 2 0.563121773599958 0 +10 4 3 0.6 2 0.572721773599958 0 +10 5 1 0.8 1 1.22162177359996 0 +10 5 2 0.7 1 0.938421773599958 1 +10 5 3 0.6 2 2.36562177359996 0 +10 6 1 0.8 1 1.00942177359996 0 +10 6 2 0.7 1 0.847421773599958 0 +10 6 3 0.6 2 1.63102177359996 0 +10 7 1 0.8 2 1.65212177359996 0 +10 7 2 0.7 1 1.90692177359996 1 +10 7 3 0.6 1 1.34452177359996 1 +10 8 1 0.8 2 1.04962177359996 0 +10 8 2 0.7 2 1.36742177359996 1 +10 8 3 0.6 1 0.649221773599958 1 +10 9 1 0.8 2 0.936921773599958 0 +10 9 2 0.7 1 0.868921773599958 1 +10 9 3 0.6 1 0.907121773599958 1 +10 10 1 0.8 1 2.88082177359996 1 +10 10 2 0.7 1 1.01302177359996 0 +10 10 3 0.6 1 1.51682177359996 0 +10 11 1 0.8 1 2.81232177359996 1 +10 11 2 0.7 2 3.12152177359996 0 +10 11 3 0.6 1 1.04892177359996 1 +10 12 1 0.8 1 1.34312177359996 1 +10 12 2 0.7 2 3.42312177359996 0 +10 12 3 0.6 2 0.600521773599958 0 +10 13 1 0.8 2 3.11902177359996 0 +10 13 2 0.7 1 1.85302177359996 1 +10 13 3 0.6 2 1.13522177359996 0 +10 14 1 0.8 1 2.17632177359996 0 +10 14 2 0.7 1 0.603521773599958 1 +10 14 3 0.6 1 2.07772177359996 1 +10 15 1 0.8 1 1.46692177359996 0 +10 15 2 0.7 1 1.05472177359996 1 +10 15 3 0.6 1 1.34192177359996 0 +10 16 1 0.8 1 1.06682177359996 1 +10 16 2 0.7 1 0.967821773599958 1 +10 16 3 0.6 1 0.660621773599958 1 +10 17 1 0.8 1 1.22622177359996 1 +10 17 2 0.7 1 1.30662177359996 1 +10 17 3 0.6 1 1.40702177359996 1 +10 18 1 0.8 2 1.34992177359996 0 +10 18 2 0.7 1 1.25652177359996 1 +10 18 3 0.6 1 0.917121773599958 1 +10 19 1 0.8 2 1.13932177359996 0 +10 19 2 0.7 2 1.48252177359996 1 +10 19 3 0.6 1 0.736321773599958 1 +10 20 1 0.8 1 0.810721773599958 1 +10 20 2 0.7 2 0.672121773599958 1 +10 20 3 0.6 2 0.879121773599958 0 +10 21 1 0.8 1 0.458221773599958 1 +10 21 2 0.7 2 0.740421773599958 1 +10 21 3 0.6 1 1.17642177359996 0 +10 22 1 0.8 2 1.89082177359996 0 +10 22 2 0.7 1 0.528621773599958 1 +10 22 3 0.6 1 1.46562177359996 0 +10 23 1 0.8 2 1.52042177359996 0 +10 23 2 0.7 2 0.711821773599958 1 +10 23 3 0.6 1 0.471021773599958 0 +10 24 1 0.8 1 1.71992177359996 1 +10 24 2 0.7 1 1.13572177359996 1 +10 24 3 0.6 2 0.546121773599958 0 +10 25 1 0.8 1 1.50772177359996 1 +10 25 2 0.7 2 0.778921773599958 0 +10 25 3 0.6 2 1.25552177359996 1 +10 26 1 0.8 1 1.25172177359996 1 +10 26 2 0.7 1 2.07432177359996 1 +10 26 3 0.6 2 0.559921773599958 0 +10 27 1 0.8 1 0.964621773599958 1 +10 27 2 0.7 2 1.75782177359996 0 +10 27 3 0.6 1 0.663021773599958 1 +10 28 1 0.8 1 0.574021773599958 1 +10 28 2 0.7 1 0.574221773599958 1 +10 28 3 0.6 1 1.54262177359996 1 +10 29 1 0.8 1 0.973421773599958 1 +10 29 2 0.7 1 1.14662177359996 1 +10 29 3 0.6 1 0.745821773599958 0 +10 30 1 0.8 1 1.69322177359996 1 +10 30 2 0.7 2 0.811221773599958 0 +10 30 3 0.6 2 0.929821773599958 1 +10 31 1 0.8 1 1.84872177359996 1 +10 31 2 0.7 1 2.19352177359996 0 +10 31 3 0.6 1 1.44532177359996 0 +10 32 1 0.8 1 0.987721773599958 1 +10 32 2 0.7 1 0.763721773599958 0 +10 32 3 0.6 1 1.10752177359996 0 +10 33 1 0.8 2 1.06732177359996 0 +10 33 2 0.7 1 1.40272177359996 0 +10 33 3 0.6 1 0.794721773599958 1 +10 34 1 0.8 1 0.933321773599958 1 +10 34 2 0.7 1 0.568321773599958 0 +10 34 3 0.6 2 0.492521773599958 1 +10 35 1 0.8 1 1.46192177359996 1 +10 35 2 0.7 1 0.741721773599958 1 +10 35 3 0.6 1 1.29592177359996 0 +10 36 1 0.8 1 0.973821773599958 1 +10 36 2 0.7 1 2.13962177359996 1 +10 36 3 0.6 2 2.16222177359996 1 +10 37 1 0.8 2 0.564621773599958 0 +10 37 2 0.7 2 3.40402177359996 1 +10 37 3 0.6 2 0.690021773599958 1 +10 38 1 0.8 1 0.603021773599958 1 +10 38 2 0.7 1 0.940221773599958 0 +10 38 3 0.6 2 0.581221773599958 0 +10 39 1 0.8 1 0.808021773599958 1 +10 39 2 0.7 1 0.978021773599958 1 +10 39 3 0.6 1 0.642221773599958 1 +10 40 1 0.8 1 0.936821773599958 1 +10 40 2 0.7 2 0.882821773599958 1 +10 40 3 0.6 1 0.657421773599958 1 diff --git a/commons/models/pstRT_ddm.yml b/commons/models/pstRT_ddm.yml new file mode 100644 index 00000000..00124cc6 --- /dev/null +++ b/commons/models/pstRT_ddm.yml @@ -0,0 +1,44 @@ +task_name: + code: pstRT + desc: Probabilistic Selection Task (with RT data) + cite: + - 'Frank, M. J., Santamaria, A., O''Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599.' + - 'Frank, M. J., Seeberger, L. C., & O''reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943.' +model_name: + code: ddm + desc: Drift Diffusion Model + cite: + - Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. +model_type: + code: '' + desc: Hierarchical +data_columns: + subjID: A unique identifier for each subject in the data-set. # Required + cond: Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3). + choice: Integer value representing the option chosen on the given trial (1 or 2). + RT: Float value representing the time taken for the response on the given trial. +parameters: + a: + desc: boundary separation + info: [0, 1.8, 'Inf'] + tau: + desc: non-decision time + info: [0, 0.3, 'Inf'] + d1: + desc: drift rate scaling + info: ['-Inf', 0.8, 'Inf'] + d2: + desc: drift rate scaling + info: ['-Inf', 0.4, 'Inf'] + d3: + desc: drift rate scaling + info: ['-Inf', 0.3, 'Inf'] +regressors: +postpreds: +- choice_os +- RT_os +additional_args: +- code: RTbound + default: 0.1 + desc: Floating point value representing the lower bound (i.e., minimum allowed) + reaction time. Defaults to 0.1 (100 milliseconds). diff --git a/commons/models/pstRT_rlddm1.yml b/commons/models/pstRT_rlddm1.yml new file mode 100644 index 00000000..bddb1171 --- /dev/null +++ b/commons/models/pstRT_rlddm1.yml @@ -0,0 +1,51 @@ +task_name: + code: pstRT + desc: Probabilistic Selection Task (with RT data) + cite: + - 'Frank, M. J., Santamaria, A., O''Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599.' + - 'Frank, M. J., Seeberger, L. C., & O''reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943.' +model_name: + code: rlddm1 + desc: Reinforcement Learning Drift Diffusion Model 1 + cite: + - Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. +model_type: + code: '' + desc: Hierarchical +data_columns: + subjID: A unique identifier for each subject in the data-set. # Required + cond: Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3). + prob: 'Float value representing the probability that a correct response (1) is rewarded in the current task condition.' + choice: Integer value representing the option chosen on the given trial (1 or 2). + RT: Float value representing the time taken for the response on the given trial. + feedback: Integer value representing the outcome of the given trial (where 'correct' == 1, and 'incorrect' == 0). +parameters: + a: + desc: boundary separation + info: [0, 1.8, 'Inf'] + tau: + desc: non-decision time + info: [0, 0.3, 'Inf'] + v: + desc: drift rate scaling + info: ['-Inf', 4.5, 'Inf'] + alpha: + desc: learning rate + info: [0, 0.02, 1] +regressors: + Q1: 2 # shape: [N, T] + Q2: 2 # shape: [N, T] +postpreds: +- choice_os +- RT_os +- choice_sm +- RT_sm +- fd_sm +additional_args: +- code: RTbound + default: 0.1 + desc: Floating point value representing the lower bound (i.e., minimum allowed) + reaction time. Defaults to 0.1 (100 milliseconds). +- code: initQ + default: 0.5 + desc: 'Floating point value representing the model''s initial value of any choice.' \ No newline at end of file diff --git a/commons/models/pstRT_rlddm6.yml b/commons/models/pstRT_rlddm6.yml new file mode 100644 index 00000000..2fc52199 --- /dev/null +++ b/commons/models/pstRT_rlddm6.yml @@ -0,0 +1,58 @@ +task_name: + code: pstRT + desc: Probabilistic Selection Task (with RT data) + cite: + - 'Frank, M. J., Santamaria, A., O''Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599.' + - 'Frank, M. J., Seeberger, L. C., & O''reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943.' +model_name: + code: rlddm6 + desc: Reinforcement Learning Drift Diffusion Model 6 + cite: + - Pedersen, M. L., Frank, M. J., & Biele, G. (2017). The drift diffusion model as the choice rule in reinforcement learning. Psychonomic bulletin & review, 24(4), 1234-1251. +model_type: + code: '' + desc: Hierarchical +data_columns: + subjID: A unique identifier for each subject in the data-set. # Required + iter: Integer value representing the trial number for each task condition. + cond: Integer value representing the task condition of the given trial (AB == 1, CD == 2, EF == 3). + prob: 'Float value representing the probability that a correct response (1) is rewarded in the current task condition.' + choice: Integer value representing the option chosen on the given trial (1 or 2). + RT: Float value representing the time taken for the response on the given trial. + feedback: Integer value representing the outcome of the given trial (where 'correct' == 1, and 'incorrect' == 0). +parameters: + a: + desc: boundary separation + info: [0, 1.6, 'Inf'] + bp: + desc: boundary separation power + info: [-0.3, 0.02, 0.3] + tau: + desc: non-decision time + info: [0, 0.2, 'Inf'] + v: + desc: drift rate scaling + info: ['-Inf', 2.8, 'Inf'] + alpha_pos: + desc: learning rate for positive prediction error + info: [0, 0.04, 1] + alpha_neg: + desc: learning rate for negative prediction error + info: [0, 0.02, 1] +regressors: + Q1: 2 # shape: [N, T] + Q2: 2 # shape: [N, T] +postpreds: +- choice_os +- RT_os +- choice_sm +- RT_sm +- fd_sm +additional_args: +- code: RTbound + default: 0.1 + desc: Floating point value representing the lower bound (i.e., minimum allowed) + reaction time. Defaults to 0.1 (100 milliseconds). +- code: initQ + default: 0.5 + desc: 'Floating point value representing the model''s initial value of any choice.' \ No newline at end of file diff --git a/commons/stan_files/pstRT_ddm.stan b/commons/stan_files/pstRT_ddm.stan new file mode 100644 index 00000000..6ac7f390 --- /dev/null +++ b/commons/stan_files/pstRT_ddm.stan @@ -0,0 +1,203 @@ +// DDM from Pedersen, Frank & Biele (2017) https://doi.org/10.3758/s13423-016-1199-y + +functions{ + // Random number generator from Shahar et al. (2019) https://doi.org/10.1371/journal.pcbi.1006803 + vector wiener_rng(real a, real tau, real z, real d) { + real dt; + real sigma; + real p; + real y; + real i; + real aa; + real ch; + real rt; + vector[2] ret; + + dt = .0001; + sigma = 1; + + y = z * a; // starting point + p = .5 * (1 + ((d * sqrt(dt)) / sigma)); + i = 0; + while (y < a && y > 0){ + aa = uniform_rng(0,1); + if (aa <= p){ + y = y + sigma * sqrt(dt); + i = i + 1; + } else { + y = y - sigma * sqrt(dt); + i = i + 1; + } + } + ch = (y <= 0) * 1 + 1; // Upper boundary choice -> 1, lower boundary choice -> 2 + rt = i * dt + tau; + + ret[1] = ch; + ret[2] = rt; + return ret; + } +} + +data { + int N; // Number of subjects + int T; // Maximum number of trials + int Tsubj[N]; // Number of trials for each subject + int n_cond; // Number of task conditions + int cond[N, T]; // Task condition (NA: -1) + int choice[N, T]; // Response (NA: -1) + real RT[N, T]; // Response time + real minRT[N]; // Minimum RT for each subject of the observed data + real RTbound; // Lower bound or RT across all subjects (e.g., 0.1 second) + real prob[n_cond]; // Reward probability for each task condition (for posterior predictive check) +} + +transformed data { +} + +parameters { + // Group-level raw parameters + vector[5] mu_pr; + vector[5] sigma; + + // Subject-level raw parameters (for Matt trick) + vector[N] a_pr; // Boundary separation + vector[N] tau_pr; // Non-decision time + vector[N] d1_pr; // Drift rate 1 + vector[N] d2_pr; // Drift rate 2 + vector[N] d3_pr; // Drift rate 3 (Assumes n_cond = 3) +} + +transformed parameters { + // Transform subject-level raw parameters + vector[N] a; + vector[N] tau; + vector[N] d1; + vector[N] d2; + vector[N] d3; + + for (i in 1:N) { + a[i] = exp(mu_pr[1] + sigma[1] * a_pr[i]); + tau[i] = Phi_approx(mu_pr[2] + sigma[2] * tau_pr[i]) * (minRT[i] - RTbound) + RTbound; + } + d1 = mu_pr[3] + sigma[3] * d1_pr; + d2 = mu_pr[4] + sigma[4] * d2_pr; + d3 = mu_pr[5] + sigma[5] * d3_pr; +} + +model { + // Group-level raw parameters + mu_pr ~ normal(0, 1); + sigma ~ normal(0, 0.2); + + // Individual parameters + a_pr ~ normal(0, 1); + tau_pr ~ normal(0, 1); + d1_pr ~ normal(0, 1); + d2_pr ~ normal(0, 1); + d3_pr ~ normal(0, 1); + + // Subject loop + for (i in 1:N) { + // Declare variables + int r; + int s; + real d; + + // Drift rates + vector[3] d_vec; // Assumes n_cond = 3 + d_vec[1] = d1[i]; + d_vec[2] = d2[i]; + d_vec[3] = d3[i]; + + // Trial loop + for (t in 1:Tsubj[i]) { + // Save values to variables + s = cond[i, t]; + r = choice[i, t]; + + // Drift diffusion process + d = d_vec[s]; // Drift rate, Q[s, 1]: upper boundary option, Q[s, 2]: lower boundary option + if (r == 1) { + RT[i, t] ~ wiener(a[i], tau[i], 0.5, d); + } else { + RT[i, t] ~ wiener(a[i], tau[i], 0.5, -d); + } + } + } +} + +generated quantities { + // For group level parameters + real mu_a; + real mu_tau; + real mu_d1; + real mu_d2; + real mu_d3; + + // For log likelihood + real log_lik[N]; + + // For posterior predictive check (one-step method) + matrix[N, T] choice_os; + matrix[N, T] RT_os; + vector[2] tmp_os; + + // Assign group-level parameter values + mu_a = exp(mu_pr[1]); + mu_tau = Phi_approx(mu_pr[2]) * (mean(minRT) - RTbound) + RTbound; + mu_d1 = mu_pr[3]; + mu_d2 = mu_pr[4]; + mu_d3 = mu_pr[5]; + + // Set all posterior predictions to -1 (avoids NULL values) + for (i in 1:N) { + for (t in 1:T) { + choice_os[i, t] = -1; + RT_os[i, t] = -1; + } + } + + { // local section, this saves time and space + // Subject loop + for (i in 1:N) { + // Declare variables + int r; + int r_sm; + int s; + real d; + real d_sm; + + // Drift rates + vector[3] d_vec; // Assumes n_cond = 3 + d_vec[1] = d1[i]; + d_vec[2] = d2[i]; + d_vec[3] = d3[i]; + + // Initialized log likelihood + log_lik[i] = 0; + + // Trial loop + for (t in 1:Tsubj[i]) { + // Save values to variables + s = cond[i, t]; + r = choice[i, t]; + + //////////// Posterior predictive check (one-step method) //////////// + + // Calculate Drift rate + d = d_vec[s]; // Q[s, 1]: upper boundary option, Q[s, 2]: lower boundary option + + // Drift diffusion process + if (r == 1) { + log_lik[i] += wiener_lpdf(RT[i, t] | a[i], tau[i], 0.5, d); + } else { + log_lik[i] += wiener_lpdf(RT[i, t] | a[i], tau[i], 0.5, -d); + } + + tmp_os = wiener_rng(a[i], tau[i], 0.5, d); + choice_os[i, t] = tmp_os[1]; + RT_os[i, t] = tmp_os[2]; + } + } + } +} diff --git a/commons/stan_files/pstRT_rlddm1.stan b/commons/stan_files/pstRT_rlddm1.stan new file mode 100644 index 00000000..021bbf2a --- /dev/null +++ b/commons/stan_files/pstRT_rlddm1.stan @@ -0,0 +1,245 @@ +// Model 6 from Pedersen, Frank & Biele (2017) https://doi.org/10.3758/s13423-016-1199-y + +functions{ + // Random number generator from Shahar et al. (2019) https://doi.org/10.1371/journal.pcbi.1006803 + vector wiener_rng(real a, real tau, real z, real d) { + real dt; + real sigma; + real p; + real y; + real i; + real aa; + real ch; + real rt; + vector[2] ret; + + dt = .0001; + sigma = 1; + + y = z * a; // starting point + p = .5 * (1 + ((d * sqrt(dt)) / sigma)); + i = 0; + while (y < a && y > 0){ + aa = uniform_rng(0,1); + if (aa <= p){ + y = y + sigma * sqrt(dt); + i = i + 1; + } else { + y = y - sigma * sqrt(dt); + i = i + 1; + } + } + ch = (y <= 0) * 1 + 1; // Upper boundary choice -> 1, lower boundary choice -> 2 + rt = i * dt + tau; + + ret[1] = ch; + ret[2] = rt; + return ret; + } +} + +data { + int N; // Number of subjects + int T; // Maximum number of trials + int Tsubj[N]; // Number of trials for each subject + int n_cond; // Number of task conditions + int cond[N, T]; // Task condition (NA: -1) + int choice[N, T]; // Response (NA: -1) + real RT[N, T]; // Response time + real fd[N, T]; // Feedback + real initQ; // Initial Q value + real minRT[N]; // Minimum RT for each subject of the observed data + real RTbound; // Lower bound or RT across all subjects (e.g., 0.1 second) + real prob[n_cond]; // Reward probability for each task condition (for posterior predictive check) +} + +transformed data { +} + +parameters { + // Group-level raw parameters + vector[4] mu_pr; + vector[4] sigma; + + // Subject-level raw parameters (for Matt trick) + vector[N] a_pr; // Boundary separation + vector[N] tau_pr; // Non-decision time + vector[N] v_pr; // Drift rate scaling + vector[N] alpha_pr; // Learning rate +} + +transformed parameters { + // Transform subject-level raw parameters + vector[N] a; + vector[N] tau; + vector[N] v; + vector[N] alpha; + + for (i in 1:N) { + a[i] = exp(mu_pr[1] + sigma[1] * a_pr[i]); + tau[i] = Phi_approx(mu_pr[2] + sigma[2] * tau_pr[i]) * (minRT[i] - RTbound) + RTbound; + alpha[i] = Phi_approx(mu_pr[4] + sigma[4] * alpha_pr[i]); + } + v = mu_pr[3] + sigma[3] * v_pr; +} + +model { + // Group-level raw parameters + mu_pr ~ normal(0, 1); + sigma ~ normal(0, 0.2); + + // Individual parameters + a_pr ~ normal(0, 1); + tau_pr ~ normal(0, 1); + v_pr ~ normal(0, 1); + alpha_pr ~ normal(0, 1); + + // Subject loop + for (i in 1:N) { + // Declare variables + int r; + int s; + real d; + + // Initialize Q-values + matrix[n_cond, 2] Q; + Q = rep_matrix(initQ, n_cond, 2); + + // Trial loop + for (t in 1:Tsubj[i]) { + // Save values to variables + s = cond[i, t]; + r = choice[i, t]; + + // Drift diffusion process + d = (Q[s, 1] - Q[s, 2]) * v[i]; // Drift rate, Q[s, 1]: upper boundary option, Q[s, 2]: lower boundary option + if (r == 1) { + RT[i, t] ~ wiener(a[i], tau[i], 0.5, d); + } else { + RT[i, t] ~ wiener(a[i], tau[i], 0.5, -d); + } + + // Update Q-value + Q[s, r] += alpha[i] * (fd[i, t] - Q[s, r]); + } + } +} + +generated quantities { + // For group level parameters + real mu_a; + real mu_tau; + real mu_v; + real mu_alpha; + + // For log likelihood + real log_lik[N]; + + // For model regressors + matrix[N, T] Q1; + matrix[N, T] Q2; + + // For posterior predictive check (one-step method) + matrix[N, T] choice_os; + matrix[N, T] RT_os; + vector[2] tmp_os; + + // For posterior predictive check (simulation method) + matrix[N, T] choice_sm; + matrix[N, T] RT_sm; + matrix[N, T] fd_sm; + vector[2] tmp_sm; + real rand; + + // Assign group-level parameter values + mu_a = exp(mu_pr[1]); + mu_tau = Phi_approx(mu_pr[2]) * (mean(minRT) - RTbound) + RTbound; + mu_v = mu_pr[3]; + mu_alpha = Phi_approx(mu_pr[4]); + + // Set all posterior predictions to -1 (avoids NULL values) + for (i in 1:N) { + for (t in 1:T) { + Q1[i, t] = -1; + Q2[i, t] = -1; + choice_os[i, t] = -1; + RT_os[i, t] = -1; + choice_sm[i, t] = -1; + RT_sm[i, t] = -1; + fd_sm[i, t] = -1; + } + } + + { // local section, this saves time and space + // Subject loop + for (i in 1:N) { + // Declare variables + int r; + int r_sm; + int s; + real d; + real d_sm; + + // Initialize Q-values + matrix[n_cond, 2] Q; + matrix[n_cond, 2] Q_sm; + Q = rep_matrix(initQ, n_cond, 2); + Q_sm = rep_matrix(initQ, n_cond, 2); + + // Initialized log likelihood + log_lik[i] = 0; + + // Trial loop + for (t in 1:Tsubj[i]) { + // Save values to variables + s = cond[i, t]; + r = choice[i, t]; + + //////////// Posterior predictive check (one-step method) //////////// + + // Calculate Drift rate + d = (Q[s, 1] - Q[s, 2]) * v[i]; // Q[s, 1]: upper boundary option, Q[s, 2]: lower boundary option + + // Drift diffusion process + if (r == 1) { + log_lik[i] += wiener_lpdf(RT[i, t] | a[i], tau[i], 0.5, d); + } else { + log_lik[i] += wiener_lpdf(RT[i, t] | a[i], tau[i], 0.5, -d); + } + + tmp_os = wiener_rng(a[i], tau[i], 0.5, d); + choice_os[i, t] = tmp_os[1]; + RT_os[i, t] = tmp_os[2]; + + // Model regressors --> store values before being updated + Q1[i, t] = Q[s, 1]; + Q2[i, t] = Q[s, 2]; + + // Update Q-value + Q[s, r] += alpha[i] * (fd[i, t] - Q[s, r]); + + //////////// Posterior predictive check (simulation method) //////////// + + // Calculate Drift rate + d_sm = (Q_sm[s, 1] - Q_sm[s, 2]) * v[i]; // Q[s, 1]: upper boundary option, Q[s, 2]: lower boundary option + + // Drift diffusion process + tmp_sm = wiener_rng(a[i], tau[i], 0.5, d_sm); + choice_sm[i, t] = tmp_sm[1]; + RT_sm[i, t] = tmp_sm[2]; + + // Determine feedback + rand = uniform_rng(0, 1); + if (choice_sm[i, t] == 1) { + fd_sm[i, t] = rand <= prob[s]; // Upper boundary choice (correct) + } else { + fd_sm[i, t] = rand > prob[s]; // Lower boundary choice (incorrect) + } + + // Update Q-value + r_sm = (choice_sm[i, t] == 2) + 1; // 'real' to 'int' conversion. 1 -> 1, 2 -> 2 + Q_sm[s, r_sm] += alpha[i] * (fd_sm[i, t] - Q_sm[s, r_sm]); + } + } + } +} diff --git a/commons/stan_files/pstRT_rlddm6.stan b/commons/stan_files/pstRT_rlddm6.stan new file mode 100644 index 00000000..56d0eed7 --- /dev/null +++ b/commons/stan_files/pstRT_rlddm6.stan @@ -0,0 +1,281 @@ +// Model 6 from Pedersen, Frank & Biele (2017) https://doi.org/10.3758/s13423-016-1199-y + +functions{ + // Random number generator from Shahar et al. (2019) https://doi.org/10.1371/journal.pcbi.1006803 + vector wiener_rng(real a, real tau, real z, real d) { + real dt; + real sigma; + real p; + real y; + real i; + real aa; + real ch; + real rt; + vector[2] ret; + + dt = .0001; + sigma = 1; + + y = z * a; // starting point + p = .5 * (1 + ((d * sqrt(dt)) / sigma)); + i = 0; + while (y < a && y > 0){ + aa = uniform_rng(0,1); + if (aa <= p){ + y = y + sigma * sqrt(dt); + i = i + 1; + } else { + y = y - sigma * sqrt(dt); + i = i + 1; + } + } + ch = (y <= 0) * 1 + 1; // Upper boundary choice -> 1, lower boundary choice -> 2 + rt = i * dt + tau; + + ret[1] = ch; + ret[2] = rt; + return ret; + } +} + +data { + int N; // Number of subjects + int T; // Maximum number of trials + int Tsubj[N]; // Number of trials for each subject + int Isubj[N, T]; // Trial number for each task condition + int n_cond; // Number of task conditions + int cond[N, T]; // Task condition (NA: -1) + int choice[N, T]; // Response (NA: -1) + real RT[N, T]; // Response time + real fd[N, T]; // Feedback + real initQ; // Initial Q value + real minRT[N]; // Minimum RT for each subject of the observed data + real RTbound; // Lower bound or RT across all subjects (e.g., 0.1 second) + real prob[n_cond]; // Reward probability for each task condition (for posterior predictive check) +} + +transformed data { +} + +parameters { + // Group-level raw parameters + vector[6] mu_pr; + vector[6] sigma; + + // Subject-level raw parameters (for Matt trick) + vector[N] a_pr; // Boundary separation + vector[N] bp_pr; // Boundary separation power + vector[N] tau_pr; // Non-decision time + vector[N] v_pr; // Drift rate scaling + vector[N] alpha_pos_pr; // Learning rate for positive prediction error + vector[N] alpha_neg_pr; // Learning rate for negative prediction error +} + +transformed parameters { + // Transform subject-level raw parameters + vector[N] a; + vector[N] bp; + vector[N] tau; + vector[N] v; + vector[N] alpha_pos; + vector[N] alpha_neg; + + for (i in 1:N) { + a[i] = exp(mu_pr[1] + sigma[1] * a_pr[i]); + bp[i] = Phi_approx(mu_pr[2] + sigma[2] * bp_pr[i]) * 0.6 - 0.3; + tau[i] = Phi_approx(mu_pr[3] + sigma[3] * tau_pr[i]) * (minRT[i] - RTbound) + RTbound; + alpha_pos[i] = Phi_approx(mu_pr[5] + sigma[5] * alpha_pos_pr[i]); + alpha_neg[i] = Phi_approx(mu_pr[6] + sigma[6] * alpha_neg_pr[i]); + } + v = mu_pr[4] + sigma[4] * v_pr; +} + +model { + // Group-level raw parameters + mu_pr ~ normal(0, 1); + sigma ~ normal(0, 0.2); + + // Individual parameters + a_pr ~ normal(0, 1); + bp_pr ~ normal(0, 1); + tau_pr ~ normal(0, 1); + v_pr ~ normal(0, 1); + alpha_pos_pr ~ normal(0, 1); + alpha_neg_pr ~ normal(0, 1); + + // Subject loop + for (i in 1:N) { + // Declare variables + int r; + int s; + real d; + real PE; + + // Initialize Q-values + matrix[n_cond, 2] Q; + Q = rep_matrix(initQ, n_cond, 2); + + // Trial loop + for (t in 1:Tsubj[i]) { + // Save values to variables + s = cond[i, t]; + r = choice[i, t]; + + // Drift diffusion process + d = (Q[s, 1] - Q[s, 2]) * v[i]; // Drift rate, Q[s, 1]: upper boundary option, Q[s, 2]: lower boundary option + if (r == 1) { + RT[i, t] ~ wiener(a[i]*(Isubj[i, t]/10.0)^bp[i], tau[i], 0.5, d); + } else { + RT[i, t] ~ wiener(a[i]*(Isubj[i, t]/10.0)^bp[i], tau[i], 0.5, -d); + } + // + // Update Q-value based on the valence of PE + PE = fd[i, t] - Q[s, r]; + + if (PE > 0) { + Q[s, r] += alpha_pos[i] * PE; + } + else { + Q[s, r] += alpha_neg[i] * PE; + } + } + } +} + +generated quantities { + // For group level parameters + real mu_a; + real mu_bp; + real mu_tau; + real mu_v; + real mu_alpha_pos; + real mu_alpha_neg; + + // For log likelihood + real log_lik[N]; + + // For model regressors + matrix[N, T] Q1; + matrix[N, T] Q2; + + // For posterior predictive check (one-step method) + matrix[N, T] choice_os; + matrix[N, T] RT_os; + vector[2] tmp_os; + + // For posterior predictive check (simulation method) + matrix[N, T] choice_sm; + matrix[N, T] RT_sm; + matrix[N, T] fd_sm; + vector[2] tmp_sm; + real rand; + + // Assign group-level parameter values + mu_a = exp(mu_pr[1]); + mu_bp = Phi_approx(mu_pr[2]) * 0.6 - 0.3; + mu_tau = Phi_approx(mu_pr[3]) * (mean(minRT) - RTbound) + RTbound; + mu_v = mu_pr[4]; + mu_alpha_pos = Phi_approx(mu_pr[5]); + mu_alpha_neg = Phi_approx(mu_pr[6]); + + // Set all posterior predictions to -1 (avoids NULL values) + for (i in 1:N) { + for (t in 1:T) { + Q1[i, t] = -1; + Q2[i, t] = -1; + choice_os[i, t] = -1; + RT_os[i, t] = -1; + choice_sm[i, t] = -1; + RT_sm[i, t] = -1; + fd_sm[i, t] = -1; + } + } + + { // local section, this saves time and space + // Subject loop + for (i in 1:N) { + // Declare variables + int r; + int r_sm; + int s; + real d; + real d_sm; + real PE; + real PE_sm; + + // Initialize Q-values + matrix[n_cond, 2] Q; + matrix[n_cond, 2] Q_sm; + Q = rep_matrix(initQ, n_cond, 2); + Q_sm = rep_matrix(initQ, n_cond, 2); + + // Initialized log likelihood + log_lik[i] = 0; + + // Trial loop + for (t in 1:Tsubj[i]) { + // Save values to variables + s = cond[i, t]; + r = choice[i, t]; + + //////////// Posterior predictive check (one-step method) //////////// + + // Calculate Drift rate + d = (Q[s, 1] - Q[s, 2]) * v[i]; // Q[s, 1]: upper boundary option, Q[s, 2]: lower boundary option + + // Drift diffusion process + if (r == 1) { + log_lik[i] += wiener_lpdf(RT[i, t] | a[i]*(Isubj[i, t]/10.0)^bp[i], tau[i], 0.5, d); + } else { + log_lik[i] += wiener_lpdf(RT[i, t] | a[i]*(Isubj[i, t]/10.0)^bp[i], tau[i], 0.5, -d); + } + + tmp_os = wiener_rng(a[i], tau[i], 0.5, d); + choice_os[i, t] = tmp_os[1]; + RT_os[i, t] = tmp_os[2]; + + // Model regressors --> store values before being updated + Q1[i, t] = Q[s, 1]; + Q2[i, t] = Q[s, 2]; + + // Update Q-value + PE = fd[i, t] - Q[s, r]; + + if (PE > 0) { + Q[s, r] += alpha_pos[i] * PE; + } else { + Q[s, r] += alpha_neg[i] * PE; + } + + //////////// Posterior predictive check (simulation method) //////////// + + // Calculate Drift rate + d_sm = (Q_sm[s, 1] - Q_sm[s, 2]) * v[i]; // Q[s, 1]: upper boundary option, Q[s, 2]: lower boundary option + + // Drift diffusion process + tmp_sm = wiener_rng(a[i]*(Isubj[i, t]/10.0)^bp[i], tau[i], 0.5, d_sm); + choice_sm[i, t] = tmp_sm[1]; + RT_sm[i, t] = tmp_sm[2]; + + // Determine feedback + rand = uniform_rng(0, 1); + if (choice_sm[i, t] == 1) { + fd_sm[i, t] = rand <= prob[s]; // Upper boundary choice (correct) + } else { + fd_sm[i, t] = rand > prob[s]; // Lower boundary choice (incorrect) + } + + // Update Q-value + r_sm = (choice_sm[i, t] == 2) + 1; // 'real' to 'int' conversion. 1 -> 1, 2 -> 2 + PE_sm = fd_sm[i, t] - Q_sm[s, r_sm]; + + if (PE_sm > 0) { + Q_sm[s, r_sm] += alpha_pos[i] * PE_sm; + } + else { + Q_sm[s, r_sm] += alpha_neg[i] * PE_sm; + } + } + } + } +} From 6ac54171282ed1268fa25105dfe040852437291a Mon Sep 17 00:00:00 2001 From: Hoyoung Doh Date: Mon, 1 Aug 2022 17:15:12 +0900 Subject: [PATCH 2/2] Remove trailing spaces --- commons/models/pstRT_rlddm1.yml | 10 +++++----- commons/models/pstRT_rlddm6.yml | 12 ++++++------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/commons/models/pstRT_rlddm1.yml b/commons/models/pstRT_rlddm1.yml index bddb1171..5109abe9 100644 --- a/commons/models/pstRT_rlddm1.yml +++ b/commons/models/pstRT_rlddm1.yml @@ -1,8 +1,8 @@ task_name: code: pstRT desc: Probabilistic Selection Task (with RT data) - cite: - - 'Frank, M. J., Santamaria, A., O''Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599.' + cite: + - 'Frank, M. J., Santamaria, A., O''Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599.' - 'Frank, M. J., Seeberger, L. C., & O''reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943.' model_name: code: rlddm1 @@ -28,13 +28,13 @@ parameters: info: [0, 0.3, 'Inf'] v: desc: drift rate scaling - info: ['-Inf', 4.5, 'Inf'] + info: ['-Inf', 4.5, 'Inf'] alpha: desc: learning rate info: [0, 0.02, 1] regressors: Q1: 2 # shape: [N, T] - Q2: 2 # shape: [N, T] + Q2: 2 # shape: [N, T] postpreds: - choice_os - RT_os @@ -48,4 +48,4 @@ additional_args: reaction time. Defaults to 0.1 (100 milliseconds). - code: initQ default: 0.5 - desc: 'Floating point value representing the model''s initial value of any choice.' \ No newline at end of file + desc: 'Floating point value representing the model''s initial value of any choice.' diff --git a/commons/models/pstRT_rlddm6.yml b/commons/models/pstRT_rlddm6.yml index 2fc52199..6a77a560 100644 --- a/commons/models/pstRT_rlddm6.yml +++ b/commons/models/pstRT_rlddm6.yml @@ -1,8 +1,8 @@ task_name: code: pstRT desc: Probabilistic Selection Task (with RT data) - cite: - - 'Frank, M. J., Santamaria, A., O''Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599.' + cite: + - 'Frank, M. J., Santamaria, A., O''Reilly, R. C., & Willcutt, E. (2007). Testing computational models of dopamine and noradrenaline dysfunction in attention deficit/hyperactivity disorder. Neuropsychopharmacology, 32(7), 1583-1599.' - 'Frank, M. J., Seeberger, L. C., & O''reilly, R. C. (2004). By carrot or by stick: cognitive reinforcement learning in parkinsonism. Science, 306(5703), 1940-1943.' model_name: code: rlddm6 @@ -26,13 +26,13 @@ parameters: info: [0, 1.6, 'Inf'] bp: desc: boundary separation power - info: [-0.3, 0.02, 0.3] + info: [-0.3, 0.02, 0.3] tau: desc: non-decision time info: [0, 0.2, 'Inf'] v: desc: drift rate scaling - info: ['-Inf', 2.8, 'Inf'] + info: ['-Inf', 2.8, 'Inf'] alpha_pos: desc: learning rate for positive prediction error info: [0, 0.04, 1] @@ -41,7 +41,7 @@ parameters: info: [0, 0.02, 1] regressors: Q1: 2 # shape: [N, T] - Q2: 2 # shape: [N, T] + Q2: 2 # shape: [N, T] postpreds: - choice_os - RT_os @@ -55,4 +55,4 @@ additional_args: reaction time. Defaults to 0.1 (100 milliseconds). - code: initQ default: 0.5 - desc: 'Floating point value representing the model''s initial value of any choice.' \ No newline at end of file + desc: 'Floating point value representing the model''s initial value of any choice.'