diff --git a/src/TR.jl b/src/TR.jl index 2a0ac86..b9a924f 100644 --- a/src/TR.jl +++ b/src/TR.jl @@ -4,8 +4,8 @@ In practice the most naive way of approaching the update problem """ function TRLooCVUpdateNaive(X, y, lambdasu, bOld) -n, p = size(X); -rmsecvman = zeros(length(lambdasu)); +n, p = size(X); +rmsecv = zeros(length(lambdasu)); for i = 1:n inds = setdiff(1:n, i); @@ -24,9 +24,9 @@ for i = 1:n end end -rmsecvman = sqrt.(1/n .* rmsecvman); +rmsecv = sqrt.(1/n .* rmsecv); -return rmsecvman +return rmsecv end """ @@ -35,8 +35,8 @@ Hence regression coefficients are calculated for all lambda values """ function TRLooCVUpdateFair(X, y, lambdasu, bOld) -n, p = size(X); -rmsecvman = zeros(length(lambdasu)) +n, p = size(X); +rmsecv = zeros(length(lambdasu)) for i = 1:n inds = setdiff(1:n, i); @@ -53,13 +53,13 @@ for i = 1:n denom2 = broadcast(.+, ones(n-1), broadcast(./, lambdasu', s.^2)) # Calculating regression coefficients and residual - bcoeffs = V * broadcast(./, (U' * ys), denom) .+ bOld .- V * broadcast(./, V' * bOld, denom2); - rmsecvman += ((y[i] .- ((X[i,:]' .- mX) * bcoeffs .+ my)).^2)'; + bcoeffs = V * broadcast(./, (U' * ys), denom) .+ bOld .- V * broadcast(./, V' * bOld, denom2); + rmsecv += ((y[i] .- ((X[i,:]' .- mX) * bcoeffs .+ my)).^2)'; end -rmsecvman = sqrt.(1/n .* rmsecvman); +rmsecv = sqrt.(1/n .* rmsecv); -return rmsecvman +return rmsecv end """ @@ -500,9 +500,9 @@ The LS problem is solved explicitly and no shortcuts are used. """ function TRSegCVUpdateNaive(X, y, lambdas, cvfolds, bOld) -n, p = size(X); -rmsecvman = zeros(length(lambdas)); -nfolds = length(unique(cvfolds)); +n, p = size(X); +rmsecv = zeros(length(lambdas)); +nfolds = length(unique(cvfolds)); for j = 1:length(lambdas) for i = 1:nfolds @@ -515,14 +515,14 @@ for j = 1:length(lambdas) Xs = Xdata .- mX; ys = ydata .- my; - betas = [Xs; sqrt(lambdas[j]) * I(p)] \ [ys; sqrt(lambdas[j]) * bOld]; - rmsecvman[j] += sum((y[vec(inds)] - ((X[vec(inds),:] .- mX) * betas .+ my)).^2); + betas = [Xs; sqrt(lambdas[j]) * I(p)] \ [ys; sqrt(lambdas[j]) * bOld]; + rmsecv[j] += sum((y[vec(inds)] - ((X[vec(inds),:] .- mX) * betas .+ my)).^2); end end -rmsecvman = sqrt.(1/n .* rmsecvman); +rmsecv = sqrt.(1/n .* rmsecv); -return rmsecvman +return rmsecv end @@ -531,9 +531,9 @@ K-fold CV for the Ridge regression update problem, using the 'SVD-trick' for cal """ function TRSegCVUpdateFair(X, y, lambdas, cv, bOld) -n, p = size(X); -rmsecvman = zeros(length(lambdas)); -nfolds = length(unique(cv)); +n, p = size(X); +rmsecv = zeros(length(lambdas)); +nfolds = length(unique(cv)); for i = 1:nfolds inds = (cv .== i); @@ -551,14 +551,14 @@ for i = 1:nfolds denom2 = broadcast(.+, ones(n-sum(inds)), broadcast(./, lambdas', s.^2)); # Calculating regression coefficients - bcoeffs = V * broadcast(./, (U' * ys), denom) .+ bOld .- V * broadcast(./, V' * bOld, denom2); - rmsecvman += sum((y[vec(inds)] .- ((X[vec(inds),:] .- mX) * bcoeffs .+ my)).^2, dims=1)'; + bcoeffs = V * broadcast(./, (U' * ys), denom) .+ bOld .- V * broadcast(./, V' * bOld, denom2); + rmsecv += sum((y[vec(inds)] .- ((X[vec(inds),:] .- mX) * bcoeffs .+ my)).^2, dims=1)'; end -rmsecvman = sqrt.(1/n .* rmsecvman); +rmsecv = sqrt.(1/n .* rmsecv); - return rmsecvman + return rmsecv end """