Skip to content

Commit

Permalink
Remove references to linearComb and backTransform (#32)
Browse files Browse the repository at this point in the history
In vignettes and man pages
  • Loading branch information
kenkellner authored Dec 1, 2024
1 parent d6c2f1a commit e5d80cd
Show file tree
Hide file tree
Showing 25 changed files with 102 additions and 102 deletions.
4 changes: 2 additions & 2 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
Package: unmarked
Version: 1.4.3.9005
Date: 2024-10-25
Version: 1.4.3.9006
Date: 2024-12-01
Type: Package
Title: Models for Data from Unmarked Animals
Authors@R: c(
Expand Down
4 changes: 4 additions & 0 deletions R/predict.R
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,10 @@ setMethod("predict_inputs_from_umf", "unmarkedFit",
designMats <- getDesign(newdata, object@formula, na.rm = na.rm)
if(type == "state") list_els <- c("X","Z_state","X.offset")
if(type == "det") list_els <- c("V","Z_det","V.offset")
if(type == "scale"){ # no covariates
n <- nrow(designMats$V)
return(list(X = matrix(1, nrow=n, ncol=1), offset = rep(0, n)))
}

X <- designMats[[list_els[1]]]
if(is.null(re.form)) X <- cbind(X, designMats[[list_els[2]]])
Expand Down
5 changes: 3 additions & 2 deletions man/distsamp.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,10 @@ hist(ltUMF)

# Some methods to use on fitted model
summary(fm1)
backTransform(fm1, type="state") # animals / ha

predict(fm1, type = "state")[1,] # animals / ha
exp(coef(fm1, type="state", altNames=TRUE)) # same
backTransform(fm1, type="det") # half-normal SD
predict(fm1, type = "det")[1,] # half-normal SD
hist(fm1, xlab="Distance (m)") # Only works when there are no det covars
# Empirical Bayes estimates of posterior distribution for N_i
plot(ranef(fm1, K=50))
Expand Down
7 changes: 3 additions & 4 deletions man/gdistsamp.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -199,10 +199,9 @@ m1 <- gdistsamp(~1, ~1, ~1, umf, output="density", K=50)

summary(m1)


backTransform(m1, type="lambda")
backTransform(m1, type="phi")
backTransform(m1, type="det")
predict(m1, type = 'lambda')[1,]
predict(m1, type = 'phi')[1,]
predict(m1, type = 'det')[1,]

\dontrun{
# Empirical Bayes estimates of abundance at each site
Expand Down
8 changes: 4 additions & 4 deletions man/gmultmix.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -149,12 +149,12 @@ umf1 <- unmarkedFrameGMM(y=y.ijt, numPrimary=T, type="removal")

(m1 <- gmultmix(~1, ~1, ~1, data=umf1, K=30))

backTransform(m1, type="lambda") # Individuals per plot
backTransform(m1, type="phi") # Probability of being avilable
(p <- backTransform(m1, type="det")) # Probability of detection
p <- coef(p)
predict(m1, type="lambda")[1,] # Individuals per plot
predict(m1, type="phi")[1,] # Probability of being avilable
(p <- predict(m1, type="det")[1,]) # Probability of detection

# Multinomial cell probabilities under removal design
p <- p$Predicted
c(p, (1-p) * p, (1-p)^2 * p)

# Or more generally:
Expand Down
6 changes: 3 additions & 3 deletions man/gpcount.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ umf <- unmarkedFrameGPC(y=ym, numPrimary=nVisits)
\dontrun{
fmu <- gpcount(~1, ~1, ~1, umf, K=40, control=list(trace=TRUE, REPORT=1))

backTransform(fmu, type="lambda")
backTransform(fmu, type="phi")
backTransform(fmu, type="det")
predict(fmu, type="lambda")[1,]
predict(fmu, type="phi")[1,]
predict(fmu, type="det")[1,]
}

}
2 changes: 1 addition & 1 deletion man/multinomPois.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ ovenFrame <- unmarkedFrameMPois(ovendata.list$data,
(fm1 <- multinomPois(~ 1 ~ ufc + trba, ovenFrame))

# Detection probability for a single pass
backTransform(fm1, type="det")
predict(fm1, type="det")[1,]

# Detection probability after 4 removal passes
rowSums(getP(fm1))
Expand Down
9 changes: 3 additions & 6 deletions man/occu.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -110,12 +110,9 @@ obsCovs(pferUMF) <- data.frame(obsvar1 = rnorm(numSites(pferUMF) * obsNum(pferUM
confint(fm, type='det', method = 'normal')
confint(fm, type='det', method = 'profile')

# estimate detection effect at obsvars=0.5
(lc <- linearComb(fm['det'],c(1,0.5)))

# transform this to probability (0 to 1) scale and get confidence limits
(btlc <- backTransform(lc))
confint(btlc, level = 0.9)
# estimate detection probability and 95% CI at obsvars=0.5
nd <- data.frame(obsvar1 = 0.5)
predict(fm, type = "det", newdata = nd, appendData = TRUE)

# Empirical Bayes estimates of proportion of sites occupied
re <- ranef(fm)
Expand Down
4 changes: 2 additions & 2 deletions man/occuCOP.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ print(fitCov)

# We back-transform the parameter's estimates
## Back-transformed occupancy probability with no covariates
backTransform(fitNull, "psi")
predict(fitNull, "psi")[1,]

## Back-transformed occupancy probability depending on habitat use
predict(fitCov,
Expand All @@ -160,7 +160,7 @@ predict(fitCov,
appendData = TRUE)

## Back-transformed detection rate with no covariates
backTransform(fitNull, "lambda")
predict(fitNull, "lambda")[1,]

## Back-transformed detection rate depending on wind
predict(fitCov,
Expand Down
2 changes: 1 addition & 1 deletion man/pcountOpen.Rd
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ summary(umf)
# Fit model and backtransform
(m1 <- pcountOpen(~1, ~1, ~1, ~1, umf, K=20)) # Typically, K should be higher

(lam <- coef(backTransform(m1, "lambda"))) # or
(lam <- predict(m1, "lambda")$Predicted[1]) # or
lam <- exp(coef(m1, type="lambda"))
gam <- exp(coef(m1, type="gamma"))
om <- plogis(coef(m1, type="omega"))
Expand Down
2 changes: 2 additions & 0 deletions tests/testthat/test_distsamp.R
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,8 @@ test_that("distsamp line keyfunctions work",{
D <- backTransform(fm.haz, type="state")
Sh <- backTransform(fm.haz, type="det")
Sc <- backTransform(fm.haz, type="scale")
pr <- predict(fm.haz, type='scale')$Predicted[1] # make sure predict works with scale
expect_equal(Sc@estimate, pr)
expect_equivalent(coef(D), 137.0375, tol=1e-4)
expect_equivalent(SE(D), 16.82505, tol=1e-4)
expect_equivalent(coef(Sh), 15.90262, tol=1e-4)
Expand Down
7 changes: 4 additions & 3 deletions vignettes/cap-recap.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -349,11 +349,12 @@ lines(upper ~ woody, E.abundance, col=gray(0.7))
```

What about detection probability? Since there was no evidence of
variation in $p$, we can simply back-transform the logit-scale estimate
to obtain $\hat{p}$.
variation in $p$, we can run `predict` on the model with `type = 'det'`
in order to back-transform the logit-scale estimate and obtain $\hat{p}$.
We show just the first row of the output since all sites/occasions have identical \hat{p}.

```{r}
backTransform(M0.woody, type="det")
predict(M0.woody, type='det')[1,]
```

As suggested by the raw data, detection probability was very high. The
Expand Down
Binary file removed vignettes/colext-data-1.png
Binary file not shown.
Binary file removed vignettes/colext-est-1.png
Binary file not shown.
Binary file added vignettes/colext-figures/colext-data-1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added vignettes/colext-figures/colext-est-1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added vignettes/colext-figures/colext-gof-1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added vignettes/colext-figures/colext-pred-1.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file removed vignettes/colext-gof-1.png
Binary file not shown.
Binary file removed vignettes/colext-pred-1.png
Binary file not shown.
58 changes: 19 additions & 39 deletions vignettes/colext.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ output:
number_sections: true
toc: true
vignette: >
%\VignetteIndexEntry{Dynamic occupancy models}
%\VignetteIndexEntry{Dynamic occupancy models in unmarked}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
Expand Down Expand Up @@ -350,7 +350,7 @@ legend(1, 0.6, c("truth", "observed"),
col=c("black", "blue"), lty=c(1,3), pch=c(16,1))
```

![Figure 1. Summary of the multi-year occupancy data set generated.](colext-data-1.png)
![Figure 1. Summary of the multi-year occupancy data set generated.](colext-figures/colext-data-1.png)

To analyze this data set with a dynamic occupancy model in
`unmarked`, we first load the package.
Expand Down Expand Up @@ -460,9 +460,6 @@ summary(m0)
##
## AIC: 4972.597
## Number of sites: 250
## optim convergence code: 0
## optim iterations: 27
## Bootstrap iterations: 0
```

The computation time was only a few seconds.
Expand All @@ -479,11 +476,12 @@ plogis(-0.813)
## [1] 0.3072516
```

Alternatively, we can use `backTransform`, which
computes standard errors using the delta method. Confidence intervals
are also easily obtained using the function `confint`.
Alternatively, we can use `predict`, which also computes a 95% confidence interval.
We first remind ourselves of the names of parameters, which can all be
used as arguments for these functions.
By default `predict` returns estimates of occupancy for each site.
We select just the first row of `predict` output because all sites have
the same occupancy estimate.


``` r
Expand All @@ -495,25 +493,12 @@ names(m0)
```

``` r
backTransform(m0, type="psi")
predict(m0, type="psi")[1,]
```

```
## Backtransformed linear combination(s) of Initial estimate(s)
##
## Estimate SE LinComb (Intercept)
## 0.307 0.0335 -0.813 1
##
## Transformation: logistic
```

``` r
confint(backTransform(m0, type="psi"))
```

```
## 0.025 0.975
## 0.2457313 0.3765804
## Predicted SE lower upper
## 1 0.3072943 0.03352795 0.2457313 0.3765804
```

Next, we fit the dynamic occupancy model with full year-dependence in
Expand Down Expand Up @@ -548,11 +533,11 @@ m1
## colext(psiformula = ~1, gammaformula = ~year - 1, epsilonformula = ~year -
## 1, pformula = ~year - 1, data = simUMF)
##
## Initial:
## Initial (logit-scale):
## Estimate SE z P(>|z|)
## -0.273 0.302 -0.906 0.365
##
## Colonization:
## Colonization (logit-scale):
## Estimate SE z P(>|z|)
## year01 -2.08 0.951 -2.19 2.86e-02
## year02 -2.18 0.365 -5.96 2.52e-09
Expand All @@ -564,7 +549,7 @@ m1
## year08 -1.43 0.228 -6.29 3.19e-10
## year09 -2.35 0.470 -5.00 5.64e-07
##
## Extinction:
## Extinction (logit-scale):
## Estimate SE z P(>|z|)
## year01 -1.4209 0.418 -3.401 6.72e-04
## year02 -0.4808 0.239 -2.009 4.45e-02
Expand All @@ -576,7 +561,7 @@ m1
## year08 -1.1894 0.292 -4.076 4.58e-05
## year09 -0.6292 0.635 -0.991 3.22e-01
##
## Detection:
## Detection (logit-scale):
## Estimate SE z P(>|z|)
## year01 -1.0824 0.244 -4.434 9.26e-06
## year02 -0.2232 0.148 -1.508 1.32e-01
Expand All @@ -589,7 +574,8 @@ m1
## year09 0.6052 0.140 4.338 1.44e-05
## year10 -1.1699 0.306 -3.828 1.29e-04
##
## AIC: 4779.172
## AIC: 4779.172
## Number of sites: 250
```

## Manipulating results: prediction and plotting
Expand All @@ -598,9 +584,7 @@ Again, all estimates are shown on the logit-scale. Back-transforming
estimates when covariates, such as year, are present involves an
extra step. Specifically, we need to tell `unmarked` the values
of our covariate
at which we want an estimate. This can be done using
`backTransform` in combination with `linearComb`, although
it can be easier to use `predict`. `predict` allows the user
at which we want an estimate. This is easiest using `predict`. `predict` allows the user
to supply a data.frame in which each row represents a combination of
covariate values of interest. Below, we create data.frames called
`nd` with each row representing a year.
Expand Down Expand Up @@ -663,7 +647,7 @@ with(E.det, { # Plot for detection probability: note 10 years
})
```

![Figure 2. Yearly estimates of parameters](colext-est-1.png)
![Figure 2. Yearly estimates of parameters](colext-figures/colext-est-1.png)

``` r
par(op)
Expand Down Expand Up @@ -823,7 +807,7 @@ plot(pb.gof, xlab=expression(chi^2), main="", col=gray(0.95),
xlim=c(7300, 7700))
```

![Figure 3. Goodness-of-fit](colext-gof-1.png)
![Figure 3. Goodness-of-fit](colext-figures/colext-gof-1.png)

Figure 3 indicates that, as expected, the constant
parameter model does not fit the data well.
Expand Down Expand Up @@ -1067,7 +1051,7 @@ with(E.p, {
})
```

![Figure 4. Covariates](colext-pred-1.png)
![Figure 4. Covariates](colext-figures/colext-pred-1.png)

``` r
par(op)
Expand All @@ -1081,7 +1065,3 @@ initial funding and support for the package. The questions of many
people on the users' list motivated the writing of this document.

# References

```{r, echo=FALSE}
options(rmarkdown.html_vignette.check_title = FALSE)
```
18 changes: 8 additions & 10 deletions vignettes/colext.Rmd.orig
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ output:
number_sections: true
toc: true
vignette: >
%\VignetteIndexEntry{Dynamic occupancy models}
%\VignetteIndexEntry{Dynamic occupancy models in unmarked}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---

```{r,echo=FALSE}
options(rmarkdown.html_vignette.check_title = FALSE)
knitr::opts_chunk$set(message=FALSE, warning=FALSE)
knitr::opts_chunk$set(fig.path="")
knitr::opts_chunk$set(fig.path="colext-figures/")
set.seed(456)
```

Expand Down Expand Up @@ -416,16 +416,16 @@ inverse-logit function, named `plogis` in R.
plogis(-0.813)
```

Alternatively, we can use `backTransform`, which
computes standard errors using the delta method. Confidence intervals
are also easily obtained using the function `confint`.
Alternatively, we can use `predict`, which also computes a 95% confidence interval.
We first remind ourselves of the names of parameters, which can all be
used as arguments for these functions.
By default `predict` returns estimates of occupancy for each site.
We select just the first row of `predict` output because all sites have
the same occupancy estimate.

```{r}
names(m0)
backTransform(m0, type="psi")
confint(backTransform(m0, type="psi"))
predict(m0, type="psi")[1,]
```

Next, we fit the dynamic occupancy model with full year-dependence in
Expand Down Expand Up @@ -459,9 +459,7 @@ Again, all estimates are shown on the logit-scale. Back-transforming
estimates when covariates, such as year, are present involves an
extra step. Specifically, we need to tell `unmarked` the values
of our covariate
at which we want an estimate. This can be done using
`backTransform` in combination with `linearComb`, although
it can be easier to use `predict`. `predict` allows the user
at which we want an estimate. This is easiest using `predict`. `predict` allows the user
to supply a data.frame in which each row represents a combination of
covariate values of interest. Below, we create data.frames called
`nd` with each row representing a year.
Expand Down
Loading

0 comments on commit e5d80cd

Please sign in to comment.