|
| 1 | +// DO NOT EDIT. |
| 2 | +// This file was generated automatically |
| 3 | +// from gen.go. Please do not edit this file. |
| 4 | + |
| 5 | +// package md1 implements basic 1D math useful for 3D graphics applications. |
| 6 | +// Functions in this package have their OpenGL equivalent which is usually of the same name. |
| 7 | +package md1 |
| 8 | + |
| 9 | +import ( |
| 10 | + math "math" |
| 11 | + "github.com/soypat/geometry/internal" |
| 12 | +) |
| 13 | + |
| 14 | +// Sign returns -1, 0, or 1 for negative, zero or positive x argument, respectively, just like OpenGL's "sign" function. |
| 15 | +func Sign(x float64) float64 { |
| 16 | + if x == 0 { |
| 17 | + return 0 |
| 18 | + } |
| 19 | + return math.Copysign(1, x) |
| 20 | +} |
| 21 | + |
| 22 | +// Clamp returns value v clamped between Min and Max. |
| 23 | +func Clamp(v, Min, Max float64) float64 { |
| 24 | + return math.Min(Max, math.Max(v, Min)) |
| 25 | +} |
| 26 | + |
| 27 | +// Interp performs the linear interpolation between x and y, mapping with a in interval [0,1]. |
| 28 | +// This function is known as "mix" in OpenGL. |
| 29 | +func Interp(x, y, a float64) float64 { |
| 30 | + return x*(1-a) + y*a |
| 31 | +} |
| 32 | + |
| 33 | +// SmoothStep performs smooth cubic hermite interpolation between 0 and 1 when edge0 < x < edge1. |
| 34 | +func SmoothStep(edge0, edge1, x float64) float64 { |
| 35 | + t := Clamp((x-edge0)/(edge1-edge0), 0, 1) |
| 36 | + return t * t * (3 - 2*t) |
| 37 | +} |
| 38 | + |
| 39 | +// EqualWithinAbs checks if a and b are within tol of eachother. |
| 40 | +func EqualWithinAbs(a, b, tol float64) bool { |
| 41 | + return math.Abs(a-b) <= tol |
| 42 | +} |
| 43 | + |
| 44 | +// DefaultNewtonRaphsonSolver returns a [NewtonRaphsonSolver] with recommended parameters. |
| 45 | +func DefaultNewtonRaphsonSolver() NewtonRaphsonSolver { |
| 46 | + return NewtonRaphsonSolver{ |
| 47 | + MaxIterations: 17, |
| 48 | + Dx: internal.Smallfloat64, |
| 49 | + Tolerance: 1.49012 * internal.Smallfloat64, |
| 50 | + } |
| 51 | +} |
| 52 | + |
| 53 | +// NewtonRaphsonSolver implements Newton-Raphson root finding algorithm for an arbitrary function. |
| 54 | +type NewtonRaphsonSolver struct { |
| 55 | + // MaxIterations specifies how many iterations of Newton's succesive |
| 56 | + // approximations to perform. Each iteration evaluates function 3 times. Parameter is required. |
| 57 | + MaxIterations int |
| 58 | + // Tolerance sets the criteria for ending the root search when f(x)/f'(x) <= Tolerance. |
| 59 | + Tolerance float64 |
| 60 | + // Dx is the step with which the gradient is calculated with central-finite-differences. |
| 61 | + Dx float64 |
| 62 | + |
| 63 | + // Optional parameters below: |
| 64 | + |
| 65 | + // Relaxation is optional parameter to avoid overshooting during gradient descent for ill conditioned functions, i.e: large gradient near root. |
| 66 | + Relaxation float64 |
| 67 | + // AdaptiveDxMaxIterations sets maximum amount of changes to step (Dx) throughout root search when encountering numerical issues. |
| 68 | + // If not set then not used. |
| 69 | + AdaptiveDxMaxIterations int |
| 70 | + // RootLims clamps search for root to x_min=RootLims[0], x_max=RootLims[1]. |
| 71 | + RootLims [2]float64 |
| 72 | +} |
| 73 | + |
| 74 | +// Root solves for a root of f such that f(x)=0 by starting guessing at x0 solving using Newton-Raphson method. |
| 75 | +// Root returns the first root found and the amount of interations before converging. |
| 76 | +// |
| 77 | +// If the convergence parameter returned is negative a solution was not found within the desired tolerance. |
| 78 | +func (nra NewtonRaphsonSolver) Root(x0 float64, f func(xGuess float64) float64) (x_root float64, convergedIn int) { |
| 79 | + switch { |
| 80 | + case nra.RootLims[0] > nra.RootLims[1]: |
| 81 | + panic("invalid RootLims") |
| 82 | + case nra.MaxIterations <= 0: |
| 83 | + panic("invalid MaxIterations") |
| 84 | + case nra.Tolerance <= 0 || math.IsNaN(nra.Tolerance): |
| 85 | + panic("invalid Tolerance") |
| 86 | + case nra.Dx <= 0 || math.IsNaN(nra.Dx): |
| 87 | + panic("invalid Step") |
| 88 | + case nra.AdaptiveDxMaxIterations < 0: |
| 89 | + panic("invalid AdaptiveStepMaxIterations") |
| 90 | + case math.IsNaN(nra.Relaxation): |
| 91 | + panic("invalid Relaxation") |
| 92 | + } |
| 93 | + |
| 94 | + clampSol := nra.RootLims != [2]float64{} |
| 95 | + krelax := 1 - nra.Relaxation |
| 96 | + x_root = x0 |
| 97 | + |
| 98 | + adapt := 1 |
| 99 | + dx := nra.Dx |
| 100 | + dxdiv2 := dx / 2 |
| 101 | + |
| 102 | + for i := 1; i <= nra.MaxIterations; i++ { |
| 103 | + // Approximate derivative f'(x) with central finite difference method. |
| 104 | + // Requires more evaluations but is more precise than regular finite differences. |
| 105 | + fxp := f(x_root + dxdiv2) |
| 106 | + fxn := f(x_root - dxdiv2) |
| 107 | + fprime := (fxp - fxn) / dx |
| 108 | + |
| 109 | + if fprime == 0 || math.IsNaN(fprime) { |
| 110 | + // Converged to a local minimum which is not a root or problem badly conditioned. |
| 111 | + if adapt > nra.AdaptiveDxMaxIterations { |
| 112 | + return x_root, -i |
| 113 | + } |
| 114 | + // Adapt step to be larger to maybe get out of badly conditioned problem. |
| 115 | + dx = nra.Dx * float64(int(1<<adapt)) |
| 116 | + dxdiv2 = dx / 2 |
| 117 | + adapt++ |
| 118 | + continue |
| 119 | + } |
| 120 | + |
| 121 | + fx := f(x_root) |
| 122 | + diff := fx / fprime |
| 123 | + if math.Abs(diff) <= nra.Tolerance { |
| 124 | + // SOLUTION FOUND. |
| 125 | + // apply one more iteration if permitted, we have two evaluations we can make use of. |
| 126 | + xnew := x_root - diff*krelax |
| 127 | + if i < nra.MaxIterations && math.Abs(fx) > math.Abs(f(xnew)) { |
| 128 | + x_root = xnew |
| 129 | + i++ |
| 130 | + } |
| 131 | + return x_root, i |
| 132 | + } |
| 133 | + x_root -= diff * krelax |
| 134 | + if clampSol { |
| 135 | + x_root = Clamp(x_root, nra.RootLims[0], nra.RootLims[1]) |
| 136 | + } |
| 137 | + } |
| 138 | + return x_root, -nra.MaxIterations |
| 139 | +} |
0 commit comments