Aliases: nls
Keywords: nonlinear regression models
### ** Examples ## Don't show: od <- options(digits=5) ## End(Don't show) require(graphics) DNase1 <- subset(DNase, Run == 1) ## using a selfStart model fm1DNase1 <- nls(density ~ SSlogis(log(conc), Asym, xmid, scal), DNase1) summary(fm1DNase1)
Formula: density ~ SSlogis(log(conc), Asym, xmid, scal) Parameters: Estimate Std. Error t value Pr(>|t|) Asym 2.3452 0.0782 30.0 2.2e-13 *** xmid 1.4831 0.0814 18.2 1.2e-10 *** scal 1.0415 0.0323 32.3 8.5e-14 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.0192 on 13 degrees of freedom Number of iterations to convergence: 0 Achieved convergence tolerance: 8.28e-06
## the coefficients only: coef(fm1DNase1)
Asym xmid scal 2.3452 1.4831 1.0415
## including their SE, etc: coef(summary(fm1DNase1))
Estimate Std. Error t value Pr(>|t|) Asym 2.3452 0.078154 30.007 2.1655e-13 xmid 1.4831 0.081353 18.230 1.2185e-10 scal 1.0415 0.032271 32.272 8.5069e-14
## using conditional linearity fm2DNase1 <- nls(density ~ 1/(1 + exp((xmid - log(conc))/scal)), data = DNase1, start = list(xmid = 0, scal = 1), algorithm = "plinear") summary(fm2DNase1)
Formula: density ~ 1/(1 + exp((xmid - log(conc))/scal)) Parameters: Estimate Std. Error t value Pr(>|t|) xmid 1.4831 0.0814 18.2 1.2e-10 *** scal 1.0415 0.0323 32.3 8.5e-14 *** .lin 2.3452 0.0782 30.0 2.2e-13 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.0192 on 13 degrees of freedom Number of iterations to convergence: 5 Achieved convergence tolerance: 1.03e-06
## without conditional linearity fm3DNase1 <- nls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)), data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1)) summary(fm3DNase1)
Formula: density ~ Asym/(1 + exp((xmid - log(conc))/scal)) Parameters: Estimate Std. Error t value Pr(>|t|) Asym 2.3452 0.0782 30.0 2.2e-13 *** xmid 1.4831 0.0814 18.2 1.2e-10 *** scal 1.0415 0.0323 32.3 8.5e-14 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.0192 on 13 degrees of freedom Number of iterations to convergence: 6 Achieved convergence tolerance: 2.03e-06
## using Port's nl2sol algorithm fm4DNase1 <- nls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)), data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1), algorithm = "port") summary(fm4DNase1)
Formula: density ~ Asym/(1 + exp((xmid - log(conc))/scal)) Parameters: Estimate Std. Error t value Pr(>|t|) Asym 2.3452 0.0782 30.0 2.2e-13 *** xmid 1.4831 0.0814 18.2 1.2e-10 *** scal 1.0415 0.0323 32.3 8.5e-14 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.0192 on 13 degrees of freedom Algorithm "port", convergence message: relative convergence (4)
## weighted nonlinear regression Treated <- Puromycin[Puromycin$state == "treated", ] weighted.MM <- function(resp, conc, Vm, K) { ## Purpose: exactly as white book p. 451 -- RHS for nls() ## Weighted version of Michaelis-Menten model ## ---------------------------------------------------------- ## Arguments: 'y', 'x' and the two parameters (see book) ## ---------------------------------------------------------- ## Author: Martin Maechler, Date: 23 Mar 2001 pred <- (Vm * conc)/(K + conc) (resp - pred) / sqrt(pred) } Pur.wt <- nls( ~ weighted.MM(rate, conc, Vm, K), data = Treated, start = list(Vm = 200, K = 0.1)) summary(Pur.wt)
Formula: 0 ~ weighted.MM(rate, conc, Vm, K) Parameters: Estimate Std. Error t value Pr(>|t|) Vm 2.07e+02 9.22e+00 22.42 7.0e-10 *** K 5.46e-02 7.98e-03 6.84 4.5e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 1.21 on 10 degrees of freedom Number of iterations to convergence: 5 Achieved convergence tolerance: 3.86e-06
## Passing arguments using a list that can not be coerced to a data.frame lisTreat <- with(Treated, list(conc1 = conc[1], conc.1 = conc[-1], rate = rate)) weighted.MM1 <- function(resp, conc1, conc.1, Vm, K) { conc <- c(conc1, conc.1) pred <- (Vm * conc)/(K + conc) (resp - pred) / sqrt(pred) } Pur.wt1 <- nls( ~ weighted.MM1(rate, conc1, conc.1, Vm, K), data = lisTreat, start = list(Vm = 200, K = 0.1)) stopifnot(all.equal(coef(Pur.wt), coef(Pur.wt1))) ## Chambers and Hastie (1992) Statistical Models in S (p. 537): ## If the value of the right side [of formula] has an attribute called ## 'gradient' this should be a matrix with the number of rows equal ## to the length of the response and one column for each parameter. weighted.MM.grad <- function(resp, conc1, conc.1, Vm, K) { conc <- c(conc1, conc.1) K.conc <- K+conc dy.dV <- conc/K.conc dy.dK <- -Vm*dy.dV/K.conc pred <- Vm*dy.dV pred.5 <- sqrt(pred) dev <- (resp - pred) / pred.5 Ddev <- -0.5*(resp+pred)/(pred.5*pred) attr(dev, "gradient") <- Ddev * cbind(Vm = dy.dV, K = dy.dK) dev } Pur.wt.grad <- nls( ~ weighted.MM.grad(rate, conc1, conc.1, Vm, K), data = lisTreat, start = list(Vm = 200, K = 0.1)) rbind(coef(Pur.wt), coef(Pur.wt1), coef(Pur.wt.grad))
Vm K [1,] 206.83 0.054611 [2,] 206.83 0.054611 [3,] 206.83 0.054611
## In this example, there seems no advantage to providing the gradient. ## In other cases, there might be. ## The two examples below show that you can fit a model to ## artificial data with noise but not to artificial data ## without noise. x <- 1:10 y <- 2*x + 3 # perfect fit ## terminates in an error, because convergence cannot be confirmed: try(nls(y ~ a + b*x, start = list(a = 0.12345, b = 0.54321)))
Error in nls(y ~ a + b * x, start = list(a = 0.12345, b = 0.54321)) : number of iterations exceeded maximum of 50
## adjusting the convergence test by adding 'scaleOffset' to its denominator RSS: nls(y ~ a + b*x, start = list(a = 0.12345, b = 0.54321), control = list(scaleOffset = 1, printEval=TRUE))
It. 1, fac= 1, eval (no.,total): ( 1, 1): new dev = 1.05935e-12
Nonlinear regression model model: y ~ a + b * x data: parent.frame() a b 3 2 residual sum-of-squares: 1.06e-12 Number of iterations to convergence: 1 Achieved convergence tolerance: 3.64e-07
## Alternatively jittering the "too exact" values, slightly: set.seed(27) yeps <- y + rnorm(length(y), sd = 0.01) # added noise nls(yeps ~ a + b*x, start = list(a = 0.12345, b = 0.54321))
Nonlinear regression model model: yeps ~ a + b * x data: parent.frame() a b 3 2 residual sum-of-squares: 0.00135 Number of iterations to convergence: 2 Achieved convergence tolerance: 8.66e-09
## the nls() internal cheap guess for starting values can be sufficient: x <- -(1:100)/10 y <- 100 + 10 * exp(x / 2) + rnorm(x)/10 nlmod <- nls(y ~ Const + A * exp(B * x))
Warning in nls(y ~ Const + A * exp(B * x)): No starting values specified for some parameters. Initializing 'Const', 'A', 'B' to '1.'. Consider specifying 'start' or using a selfStart model
plot(x,y, main = "nls(*), data, true function and fit, n=100") curve(100 + 10 * exp(x / 2), col = 4, add = TRUE) lines(x, predict(nlmod), col = 2)
## Here, requiring close convergence, must use more accurate numerical differentiation, ## as this typically gives Error: "step factor .. reduced below 'minFactor' .." ## IGNORE_RDIFF_BEGIN try(nlm1 <- update(nlmod, control = list(tol = 1e-7)))
Warning in nls(formula = y ~ Const + A * exp(B * x), algorithm = "default", : No starting values specified for some parameters. Initializing 'Const', 'A', 'B' to '1.'. Consider specifying 'start' or using a selfStart model
Error in nls(formula = y ~ Const + A * exp(B * x), algorithm = "default", : step factor 0.000488281 reduced below 'minFactor' of 0.000976562
o2 <- options(digits = 10) # more accuracy for 'trace' ## central differencing works here typically (PR#18165: not converging on *some*): ctr2 <- nls.control(nDcentral=TRUE, tol = 8e-8, # <- even smaller than above warnOnly = TRUE || # << work around; e.g. needed on some ATLAS-Lapack setups (grepl("^aarch64.*linux", R.version$platform) && grepl("^NixOS", osVersion) )) (nlm2 <- update(nlmod, control = ctr2, trace = TRUE)); options(o2)
Warning in nls(formula = y ~ Const + A * exp(B * x), algorithm = "default", : No starting values specified for some parameters. Initializing 'Const', 'A', 'B' to '1.'. Consider specifying 'start' or using a selfStart model
1017460.306 (4.15e+02): par = (1 1 1) 758164.7503 (2.34e+02): par = (13.42031396 1.961485 0.05947543745) 269506.3537 (3.23e+02): par = (51.75719817 -13.09155958 0.8428607712) 68969.21891 (1.03e+02): par = (76.0006985 -1.93522675 1.0190858) 633.3672224 (1.29e+00): par = (100.3761515 8.624648408 5.104490252) 151.4400170 (9.39e+00): par = (100.6344391 4.913490999 0.2849209664) 53.08739445 (7.24e+00): par = (100.6830407 6.899303393 0.4637755095) 1.344478582 (5.97e-01): par = (100.0368306 9.897714144 0.5169294926) 0.9908415908 (1.55e-02): par = (100.0300625 9.9144191 0.5023516843) 0.9906046057 (1.84e-05): par = (100.0288724 9.916224018 0.5025207336) 0.9906046054 (9.94e-08): par = (100.028875 9.916228366 0.50252165) 0.9906046054 (5.00e-08): par = (100.028875 9.916228377 0.5025216525)
Nonlinear regression model model: y ~ Const + A * exp(B * x) data: parent.frame() Const A B 100.0288750 9.9162284 0.5025217 residual sum-of-squares: 0.9906046 Number of iterations to convergence: 11 Achieved convergence tolerance: 4.996813e-08
## --> convergence tolerance 4.997e-8 (in 11 iter.) ## IGNORE_RDIFF_END ## The muscle dataset in MASS is from an experiment on muscle ## contraction on 21 animals. The observed variables are Strip ## (identifier of muscle), Conc (Cacl concentration) and Length ## (resulting length of muscle section). ## IGNORE_RDIFF_BEGIN if(requireNamespace("MASS", quietly = TRUE)) withAutoprint({ ## The non linear model considered is ## Length = alpha + beta*exp(-Conc/theta) + error ## where theta is constant but alpha and beta may vary with Strip. with(MASS::muscle, table(Strip)) # 2, 3 or 4 obs per strip ## We first use the plinear algorithm to fit an overall model, ## ignoring that alpha and beta might vary with Strip. musc.1 <- nls(Length ~ cbind(1, exp(-Conc/th)), MASS::muscle, start = list(th = 1), algorithm = "plinear") summary(musc.1) ## Then we use nls' indexing feature for parameters in non-linear ## models to use the conventional algorithm to fit a model in which ## alpha and beta vary with Strip. The starting values are provided ## by the previously fitted model. ## Note that with indexed parameters, the starting values must be ## given in a list (with names): b <- coef(musc.1) musc.2 <- nls(Length ~ a[Strip] + b[Strip]*exp(-Conc/th), MASS::muscle, start = list(a = rep(b[2], 21), b = rep(b[3], 21), th = b[1])) summary(musc.2) })
> with(MASS::muscle, table(Strip)) Strip S01 S02 S03 S04 S05 S06 S07 S08 S09 S10 S11 S12 S13 S14 S15 S16 S17 S18 S19 S20 4 4 4 3 3 3 2 2 2 2 3 2 2 2 2 4 4 3 3 3 S21 3 > musc.1 <- nls(Length ~ cbind(1, exp(-Conc/th)), MASS::muscle, start = list(th = 1), + algorithm = "plinear") > summary(musc.1) Formula: Length ~ cbind(1, exp(-Conc/th)) Parameters: Estimate Std. Error t value Pr(>|t|) th 0.608 0.115 5.31 1.9e-06 *** .lin1 28.963 1.230 23.55 < 2e-16 *** .lin2 -34.227 3.793 -9.02 1.4e-12 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 4.67 on 57 degrees of freedom Number of iterations to convergence: 5 Achieved convergence tolerance: 9.32e-07 > b <- coef(musc.1) > musc.2 <- nls(Length ~ a[Strip] + b[Strip] * exp(-Conc/th), MASS::muscle, + start = list(a = rep(b[2], 21), b = rep(b[3], 21), th = b[1])) > summary(musc.2) Formula: Length ~ a[Strip] + b[Strip] * exp(-Conc/th) Parameters: Estimate Std. Error t value Pr(>|t|) a1 23.454 0.796 29.46 5.0e-16 *** a2 28.302 0.793 35.70 < 2e-16 *** a3 30.801 1.716 17.95 1.7e-12 *** a4 25.921 3.016 8.60 1.4e-07 *** a5 23.201 2.891 8.02 3.5e-07 *** a6 20.120 2.435 8.26 2.3e-07 *** a7 33.595 1.682 19.98 3.0e-13 *** a8 39.053 3.753 10.41 8.6e-09 *** a9 32.137 3.318 9.69 2.5e-08 *** a10 40.005 3.336 11.99 1.0e-09 *** a11 36.190 3.109 11.64 1.6e-09 *** a12 36.911 1.839 20.07 2.8e-13 *** a13 30.635 1.700 18.02 1.6e-12 *** a14 34.312 3.495 9.82 2.0e-08 *** a15 38.395 3.375 11.38 2.3e-09 *** a16 31.226 0.886 35.26 < 2e-16 *** a17 31.230 0.821 38.02 < 2e-16 *** a18 19.998 1.011 19.78 3.6e-13 *** a19 37.095 1.071 34.65 < 2e-16 *** a20 32.594 1.121 29.07 6.2e-16 *** a21 30.376 1.057 28.74 7.5e-16 *** b1 -27.300 6.873 -3.97 0.00099 *** b2 -26.270 6.754 -3.89 0.00118 ** b3 -30.901 2.270 -13.61 1.4e-10 *** b4 -32.238 3.810 -8.46 1.7e-07 *** b5 -29.941 3.773 -7.94 4.1e-07 *** b6 -20.622 3.647 -5.65 2.9e-05 *** b7 -19.625 8.085 -2.43 0.02661 * b8 -45.780 4.113 -11.13 3.2e-09 *** b9 -31.345 6.352 -4.93 0.00013 *** b10 -38.599 3.955 -9.76 2.2e-08 *** b11 -33.921 3.839 -8.84 9.2e-08 *** b12 -38.268 8.992 -4.26 0.00053 *** b13 -22.568 8.194 -2.75 0.01355 * b14 -36.167 6.358 -5.69 2.7e-05 *** b15 -32.952 6.354 -5.19 7.4e-05 *** b16 -47.207 9.540 -4.95 0.00012 *** b17 -33.875 7.688 -4.41 0.00039 *** b18 -15.896 6.222 -2.55 0.02051 * b19 -28.969 7.235 -4.00 0.00092 *** b20 -36.917 8.033 -4.60 0.00026 *** b21 -26.508 7.012 -3.78 0.00149 ** th 0.797 0.127 6.30 8.0e-06 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 1.11 on 17 degrees of freedom Number of iterations to convergence: 8 Achieved convergence tolerance: 2.17e-06
## IGNORE_RDIFF_END ## Don't show: options(od) ## End(Don't show)