1 // This code has been mechanically translated from the original FORTRAN 2 // code at http://netlib.org/minpack. 3 4 /** Authors: Lars Tandle Kyllingstad 5 Copyright: Copyright (c) 2009, Lars T. Kyllingstad. All rights reserved. 6 License: Boost Licence 1.0 7 */ 8 module scid.ports.minpack.dogleg; 9 10 11 import std.algorithm: max, min; 12 import std.math; 13 14 import scid.ports.minpack.enorm; 15 16 17 18 /** Given an m by n matrix A, an n by n nonsingular diagonal matrix D, 19 an m-vector b, and a positive number delta, the problem is to 20 determine the convex combination x of the Gauss-Newton and scaled 21 gradient directions that minimizes (Ax-b) in the least squares 22 sense, subject to the restriction that the Euclidean norm of Dx 23 be at most delta. 24 25 This function completes the solution of the problem if it is 26 provided with the necessary information from the QR factorization 27 of A. That is, if A=QR, where Q has orthogonal columns and R is 28 an upper triangular matrix, then dogleg expects the full upper 29 triangle of R and the first n components of (Q^T)b. 30 31 Params: 32 n = a positive integer variable set to the order of R. 33 r = the upper triangular matrix R. 34 lr = (not documented) 35 diag = a vector of length n which must contain the diagonal 36 elements of the matrix D. 37 qtb = a vector of length n which must contain the first n 38 elements of the vector (Q^T)b. 39 delta = a positive variable which specifies an upper bound 40 on the Euclidean norm of Dx. 41 x = an output vector of length n which contains the desired 42 convex combination of the Gauss-Newton direction and 43 the scaled gradient direction. 44 wa1 = work array of length n. 45 wa2 = work array of length n. 46 */ 47 void dogleg(Real)(size_t n, Real* r, size_t lr, Real* diag, Real* qtb, 48 Real delta, Real* x, Real* wa1, Real* wa2) 49 { 50 size_t i, j, jj, jp1, k, l; 51 Real alpha, bnorm, gnorm, qnorm, sgnorm, sum, temp; 52 enum : Real 53 { 54 one = 1.0, 55 zero = 0.0, 56 57 // epsmch is the machine precision. 58 epsmch = Real.epsilon 59 } 60 61 62 63 // First, calculate the Gauss-Newton direction. 64 jj = (n*(n+1))/2; 65 for (k=0; k<n; k++) 66 { 67 j = n - k - 1; 68 jp1 = j + 1; 69 jj -= k + 1; 70 l = jj + 1; 71 72 sum = zero; 73 if (n > jp1) 74 { 75 for (i=jp1; i<n; i++) 76 { 77 sum += r[l]*x[i]; 78 l++; 79 } 80 } 81 82 temp = r[jj]; 83 if (temp == zero) 84 { 85 l = j; 86 for (i=0; i<=j; i++) 87 { 88 temp = max(temp, abs(r[l])); 89 l += n - i - 1; 90 } 91 temp *= epsmch; 92 if (temp == zero) temp = epsmch; 93 } 94 95 x[j] = (qtb[j] - sum)/temp; 96 } 97 98 99 // Test whether the Gauss-Newton direction is acceptable. 100 for (j=0; j<n; j++) 101 { 102 wa1[j] = zero; 103 wa2[j] = diag[j]*x[j]; 104 } 105 qnorm = enorm(n, wa2); 106 if (qnorm <= delta) return; 107 108 109 // The Gauss-Newton direction is not acceptable. 110 // Next, calculate the scaled gradient direction. 111 l = 0; 112 for (j=0; j<n; j++) 113 { 114 temp = qtb[j]; 115 for (i=j; i<n; i++) 116 { 117 wa1[i] = wa1[i] + r[l]*temp; 118 l++; 119 } 120 121 wa1[j] = wa1[j]/diag[j]; 122 } 123 124 125 // Calculate the norm of the scaled gradient and test for 126 // the special case in which the scaled gradient is zero. 127 gnorm = enorm(n, wa1); 128 sgnorm = zero; 129 alpha = delta/qnorm; 130 if (gnorm != zero) 131 { 132 // Calculate the point along the scaled gradient 133 // at which the quadratic is minimized. 134 for (j=0; j<n; j++) 135 wa1[j] = (wa1[j]/gnorm)/diag[j]; 136 137 l = 0; 138 for (j=0; j<n; j++) 139 { 140 sum = zero; 141 for (i=j; i<n; i++) 142 { 143 sum += r[l]*wa1[i]; 144 l++; 145 } 146 wa2[j] = sum; 147 } 148 149 temp = enorm(n, wa2); 150 sgnorm = (gnorm/temp)/temp; 151 152 153 // Test whether the scaled gradient direction is acceptable. 154 alpha = zero; 155 if (sgnorm < delta) 156 { 157 // The scaled gradient direction is not acceptable. 158 // Finally, calculate the point along the dogleg 159 // at which the quadratic is minimized. 160 bnorm = enorm(n, qtb); 161 temp = (bnorm/gnorm)*(bnorm/qnorm)*(sgnorm/delta); 162 temp = temp - (delta/qnorm)*((sgnorm/delta)^^2) 163 + sqrt((temp-delta/qnorm)^^2 164 + (one-((delta/qnorm)^^2))*(one-((sgnorm/delta)^^2))); 165 alpha = ((delta/qnorm)*(one - ((sgnorm/delta)^^2)))/temp; 166 } 167 } 168 169 170 // Form appropriate convex combination of the Gauss-Newton 171 // direction and the scaled gradient direction. 172 temp = (one - alpha)*min(sgnorm, delta); 173 for (j=0; j<n; j++) 174 x[j] = temp*wa1[j] + alpha*x[j]; 175 176 return; 177 } 178 179 180 unittest { alias dogleg!(real) realDog; }