diff --git a/README.md b/README.md index a16e04b..ff1f2bb 100644 --- a/README.md +++ b/README.md @@ -82,9 +82,9 @@ ou.gradient_descent.[函数名]([目标函数], [参数表], [初始迭代点]) | 方法头 | 解释 | | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ | -| solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 通过解方程的方式来求解精确步长 | -| steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 使用线搜索方法求解非精确步长(默认使用wolfe线搜索) | -| barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="Grippo", c1: Optional[float]=0.6, beta: Optional[float]=0.6, alpha: Optional[float]=1, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 使用Grippo与ZhangHanger提出的非单调线搜索方法更新步长 | +| solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType | 通过解方程的方式来求解精确步长 | +| steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-10, k: int=0) -> OutputType | 使用线搜索方法求解非精确步长(默认使用wolfe线搜索) | +| barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="Grippo", c1: float=0.6, beta: float=0.6, alpha: float=1, epsilon: float=1e-10, k: int=0) -> OutputType | 使用Grippo与ZhangHanger提出的非单调线搜索方法更新步长 | #### 牛顿法(newton) @@ -94,9 +94,9 @@ ou.newton.[函数名]([目标函数], [参数表], [初始迭代点]) | 方法头 | 解释 | | ----------------------------------------------------------------------------------------------- | --------------------------------- | -| classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 通过直接对目标函数二阶导矩阵(海瑟矩阵)进行求逆来获取下一步的步长 | -| modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[int]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 修正当前海瑟矩阵保证其正定性(目前只接入了一种修正方法) | -| CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 采用牛顿-共轭梯度法求解梯度(非精确牛顿法的一种) | +| classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType | 通过直接对目标函数二阶导矩阵(海瑟矩阵)进行求逆来获取下一步的步长 | +| modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: int=20, epsilon: float=1e-10, k: int=0) -> OutputType | 修正当前海瑟矩阵保证其正定性(目前只接入了一种修正方法) | +| CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-6, k: int=0) -> OutputType | 采用牛顿-共轭梯度法求解梯度(非精确牛顿法的一种) | #### 拟牛顿法(newton_quasi) @@ -106,9 +106,9 @@ ou.newton_quasi.[函数名]([目标函数], [参数表], [初始迭代点]) | 方法头 | 解释 | | -------------------------------------------------------------------------------------------- | --------------- | -| bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | BFGS方法更新海瑟矩阵 | -| dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=20, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | DFP方法更新海瑟矩阵 | -| L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 双循环方法更新BFGS海瑟矩阵 | +| bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=20, epsilon: float=1e-10, k: int=0) -> OutputType | BFGS方法更新海瑟矩阵 | +| dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=20, epsilon: float=1e-4, k: int=0) -> OutputType | DFP方法更新海瑟矩阵 | +| L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=6, epsilon: float=1e-10, k: int=0) -> OutputType | 双循环方法更新BFGS海瑟矩阵 | #### 非线性最小二乘法(nonlinear_least_square) @@ -118,8 +118,8 @@ ou.nonlinear_least_square.[函数名]([目标函数], [参数表], [初始迭代 | 方法头 | 解释 | | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 高斯-牛顿提出的方法框架,包括OR分解等操作 | -| levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, lamk: Optional[float]=1, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.9, gamma1: Optional[float]=0.7, gamma2: Optional[float]=1.3, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Levenberg Marquardt提出的方法框架 | +| gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-10, k: int=0) -> OutputType | 高斯-牛顿提出的方法框架,包括OR分解等操作 | +| levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, lamk: float=1, eta: float=0.2, p1: float=0.4, p2: float=0.9, gamma1: float=0.7, gamma2: float=1.3, epsilon: float=1e-10, k: int=0) -> OutputType | Levenberg Marquardt提出的方法框架 | #### 信赖域方法(trust_region) @@ -129,7 +129,7 @@ ou.trust_region.[函数名]([目标函数], [参数表], [初始迭代点]) | 方法头 | 解释 | | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | -| steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, r0: Optional[float]=1, rmax: Optional[float]=2, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.6, gamma1: Optional[float]=0.5, gamma2: Optional[float]=1.5, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 截断共轭梯度法在此方法中被用于搜索步长 | +| steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, r0: float=1, rmax: float=2, eta: float=0.2, p1: float=0.4, p2: float=0.6, gamma1: float=0.5, gamma2: float=1.5, epsilon: float=1e-6, k: int=0) -> OutputType | 截断共轭梯度法在此方法中被用于搜索步长 | ### 约束优化算法(constrain) @@ -146,8 +146,8 @@ oc.equal.[函数名]([目标函数], [参数表], [等式约束表], [初始迭 | 方法头 | 解释 | | ----------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -| penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=2, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 增加二次罚项 | -| lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", lamk: Optional[float]=6, sigma: Optional[float]=10, p: Optional[float]=2, etak: Optional[float]=1e-4, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 增广拉格朗日乘子法 | +| penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=2, epsilon: float=1e-4, k: int=0) -> OutputType | 增加二次罚项 | +| lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", lamk: float=6, sigma: float=10, p: float=2, etak: float=1e-4, epsilon: float=1e-6, k: int=0) -> OutputType | 增广拉格朗日乘子法 | #### 不等式约束(unequal) @@ -157,9 +157,9 @@ oc.unequal.[函数名]([目标函数], [参数表], [不等式约束表], [初 | 方法头 | 解释 | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- | -| penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=0.4, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 增加二次罚项 | -| penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=12, p: Optional[float]=0.6, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 增加分式函数罚项 | -| lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.2, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-1, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 增广拉格朗日乘子法 | +| penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=0.4, epsilon: float=1e-10, k: int=0) -> OutputType | 增加二次罚项 | +| penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=12, p: float=0.6, epsilon: float=1e-6, k: int=0) -> OutputType | 增加分式函数罚项 | +| lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", muk: float=10, sigma: float=8, alpha: float=0.2, beta: float=0.7, p: float=2, eta: float=1e-1, epsilon: float=1e-4, k: int=0) -> OutputType | 增广拉格朗日乘子法 | #### 混合等式约束(mixequal) @@ -169,9 +169,9 @@ oc.mixequal.[函数名]([目标函数], [参数表], [等式约束表], [不等 | 方法头 | 解释 | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -| penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 增加二次罚项 | -| penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=1, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | L1精确罚函数法 | -| lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", lamk: Optional[float]=6, muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.5, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-3, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 增广拉格朗日乘子法 | +| penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType | 增加二次罚项 | +| penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=1, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType | L1精确罚函数法 | +| lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", lamk: float=6, muk: float=10, sigma: float=8, alpha: float=0.5, beta: float=0.7, p: float=2, eta: float=1e-3, epsilon: float=1e-4, k: int=0) -> OutputType | 增广拉格朗日乘子法 | ### 方法的应用(example) @@ -187,10 +187,10 @@ oe.Lasso.[函数名]([矩阵A], [矩阵b], [因子mu], [参数表], [初始迭 | 方法头 | 解释 | | ------------------------------------------------------------------------------------------------------- | ---------------- | -| gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, delta: Optional[float]=10, alp: Optional[float]=1e-3, epsilon: Optional[float]=1e-2, k: Optional[int]=0) -> OutputType | 光滑化Lasso函数法 | -| subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, alphak: Optional[float]=2e-2, epsilon: Optional[float]=1e-3, k: Optional[int]=0) -> OutputType | 次梯度法Lasso避免一阶不可导 | -| penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, gamma: Optional[float]=0.01, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 罚函数法 | -| approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 邻近算子更新 | +| gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, delta: float=10, alp: float=1e-3, epsilon: float=1e-2, k: int=0) -> OutputType | 光滑化Lasso函数法 | +| subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, alphak: float=2e-2, epsilon: float=1e-3, k: int=0) -> OutputType | 次梯度法Lasso避免一阶不可导 | +| penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, gamma: float=0.01, epsilon: float=1e-6, k: int=0) -> OutputType | 罚函数法 | +| approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-4, k: int=0) -> OutputType | 邻近算子更新 | #### 曲线相切问题(WanYuan) @@ -206,7 +206,7 @@ oe.WanYuan.[函数名]([直线的斜率], [直线的截距], [二次项系数], | 方法头 | 解释 | | --------------------------------------------------------------- | -------------------- | -| solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: Optional[bool]=False, eps: Optional[float]=1e-10) -> None | 使用高斯-牛顿方法求解构造的7个残差函数 | +| solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: bool=False, eps: float=1e-10) -> None | 使用高斯-牛顿方法求解构造的7个残差函数 | ### 混合优化算法(hybrid) diff --git a/README_en.md b/README_en.md index f6ed6ff..32973f7 100644 --- a/README_en.md +++ b/README_en.md @@ -82,9 +82,9 @@ ou.gradient_descent.[Function Name]([Target Function], [Parameters], [Initial Po | head meathod | explain | | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ | -| solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Solve the exact step by solving the equation | -| steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Use line search method to solve imprecise step size (wolfe line search is used by default) | -| barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="Grippo", c1: Optional[float]=0.6, beta: Optional[float]=0.6, alpha: Optional[float]=1, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Update the step size using the nonmonotonic line search method proposed by Grippo and Zhang Hanger | +| solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType | Solve the exact step by solving the equation | +| steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-10, k: int=0) -> OutputType | Use line search method to solve imprecise step size (wolfe line search is used by default) | +| barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="Grippo", c1: float=0.6, beta: float=0.6, alpha: float=1, epsilon: float=1e-10, k: int=0) -> OutputType | Update the step size using the nonmonotonic line search method proposed by Grippo and Zhang Hanger | #### Newton Methods(newton) @@ -94,9 +94,9 @@ ou.newton.[Function Name]([Target Function], [Parameters], [Initial Point]) | head meathod | explain | | ----------------------------------------------------------------------------------------------- | --------------------------------- | -| classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | The next step is obtained by directly inverting the second derivative matrix of Target Function (Heather matrix) | -| modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[int]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Revise the current Heather matrix to ensure its positive definiteness (only one correction method is connected at present) | -| CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | Newton conjugate gradient method is used to solve the gradient (a kind of inexact Newton method) | +| classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType | The next step is obtained by directly inverting the second derivative matrix of Target Function (Heather matrix) | +| modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: int=20, epsilon: float=1e-10, k: int=0) -> OutputType | Revise the current Heather matrix to ensure its positive definiteness (only one correction method is connected at present) | +| CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-6, k: int=0) -> OutputType | Newton conjugate gradient method is used to solve the gradient (a kind of inexact Newton method) | #### Quasi Newton Methods(newton_quasi) @@ -106,9 +106,9 @@ ou.newton_quasi.[Function Name]([Target Function], [Parameters], [Initial Point] | head meathod | explain | | -------------------------------------------------------------------------------------------- | --------------- | -| bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Updating Heiser Matrix by BFGS Method | -| dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=20, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | Updating Heiser Matrix by DFP Method | -| L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Updating the Heiser Matrix of BFGS by Double Loop Method | +| bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=20, epsilon: float=1e-10, k: int=0) -> OutputType | Updating Heiser Matrix by BFGS Method | +| dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=20, epsilon: float=1e-4, k: int=0) -> OutputType | Updating Heiser Matrix by DFP Method | +| L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=6, epsilon: float=1e-10, k: int=0) -> OutputType | Updating the Heiser Matrix of BFGS by Double Loop Method | #### Nonlinear Least Square Methods(nonlinear_least_square) @@ -118,8 +118,8 @@ ou.nonlinear_least_square.[Function Name]([Target Function], [Parameters], [Init | head meathod | explain | | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Gauss Newton's method framework, including OR decomposition and other operations | -| levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, lamk: Optional[float]=1, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.9, gamma1: Optional[float]=0.7, gamma2: Optional[float]=1.3, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Methodology framework proposed by Levenberg Marquardt | +| gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-10, k: int=0) -> OutputType | Gauss Newton's method framework, including OR decomposition and other operations | +| levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, lamk: float=1, eta: float=0.2, p1: float=0.4, p2: float=0.9, gamma1: float=0.7, gamma2: float=1.3, epsilon: float=1e-10, k: int=0) -> OutputType | Methodology framework proposed by Levenberg Marquardt | #### Trust Region Methods(trust_region) @@ -129,7 +129,7 @@ ou.trust_region.[Function Name]([Target Function], [Parameters], [Initial Point] | head meathod | explain | | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | -| steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, r0: Optional[float]=1, rmax: Optional[float]=2, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.6, gamma1: Optional[float]=0.5, gamma2: Optional[float]=1.5, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | Truncated conjugate gradient method is used to search step size in this method | +| steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, r0: float=1, rmax: float=2, eta: float=0.2, p1: float=0.4, p2: float=0.6, gamma1: float=0.5, gamma2: float=1.5, epsilon: float=1e-6, k: int=0) -> OutputType | Truncated conjugate gradient method is used to search step size in this method | ### Constrained Optimization Algorithms(constrain) @@ -146,8 +146,8 @@ oc.equal.[Function Name]([Target Function], [Parameters], [Equal Constraint Tabl | head meathod | explain | | ----------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -| penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=2, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | Add secondary penalty | -| lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", lamk: Optional[float]=6, sigma: Optional[float]=10, p: Optional[float]=2, etak: Optional[float]=1e-4, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | Augmented lagrange multiplier method | +| penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=2, epsilon: float=1e-4, k: int=0) -> OutputType | Add secondary penalty | +| lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", lamk: float=6, sigma: float=10, p: float=2, etak: float=1e-4, epsilon: float=1e-6, k: int=0) -> OutputType | Augmented lagrange multiplier method | #### Unequal Constraint(unequal) @@ -157,9 +157,9 @@ oc.unequal.[Function Name]([Target Function], [Parameters], [Unequal Constraint | head meathod | explain | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- | -| penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=0.4, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Add secondary penalty | -| penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=12, p: Optional[float]=0.6, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | Increase penalty term of fractional function | -| lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.2, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-1, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | Augmented lagrange multiplier method | +| penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=0.4, epsilon: float=1e-10, k: int=0) -> OutputType | Add secondary penalty | +| penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=12, p: float=0.6, epsilon: float=1e-6, k: int=0) -> OutputType | Increase penalty term of fractional function | +| lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", muk: float=10, sigma: float=8, alpha: float=0.2, beta: float=0.7, p: float=2, eta: float=1e-1, epsilon: float=1e-4, k: int=0) -> OutputType | Augmented lagrange multiplier method | #### Mixequal Constraint(mixequal) @@ -169,9 +169,9 @@ oc.mixequal.[Function Name]([Target Function], [Parameters], [Equal Constraint T | head meathod | explain | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -| penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Add secondary penalty | -| penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=1, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | L1 exact penalty function method | -| lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", lamk: Optional[float]=6, muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.5, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-3, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | Augmented lagrange multiplier method | +| penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType | Add secondary penalty | +| penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=1, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType | L1 exact penalty function method | +| lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", lamk: float=6, muk: float=10, sigma: float=8, alpha: float=0.5, beta: float=0.7, p: float=2, eta: float=1e-3, epsilon: float=1e-4, k: int=0) -> OutputType | Augmented lagrange multiplier method | ### Application of Methods(example) @@ -187,10 +187,10 @@ oe.Lasso.[Function Name]([Matrxi A], [Matrix b], [Factor mu], [Parameters], [Ini | head meathod | explain | | ------------------------------------------------------------------------------------------------------- | ---------------- | -| gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, delta: Optional[float]=10, alp: Optional[float]=1e-3, epsilon: Optional[float]=1e-2, k: Optional[int]=0) -> OutputType | Smoothing Lasso Function Method | -| subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, alphak: Optional[float]=2e-2, epsilon: Optional[float]=1e-3, k: Optional[int]=0) -> OutputType | Sub gradient method Lasso: avoiding first order nondifferentiability | -| penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, gamma: Optional[float]=0.01, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | Penalty function method | -| approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | Adjacent operator updating | +| gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, delta: float=10, alp: float=1e-3, epsilon: float=1e-2, k: int=0) -> OutputType | Smoothing Lasso Function Method | +| subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, alphak: float=2e-2, epsilon: float=1e-3, k: int=0) -> OutputType | Sub gradient method Lasso: avoiding first order nondifferentiability | +| penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, gamma: float=0.01, epsilon: float=1e-6, k: int=0) -> OutputType | Penalty function method | +| approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-4, k: int=0) -> OutputType | Adjacent operator updating | #### Curve Tangency Problem(WanYuan) @@ -206,7 +206,7 @@ Given the slope and intercept of a straight line, given the coefficient of the q | head meathod | explain | | --------------------------------------------------------------- | -------------------- | -| solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: Optional[bool]=False, eps: Optional[float]=1e-10) -> None | Using Gauss Newton Method to solve the 7 Residual Functions Constructed | +| solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: bool=False, eps: float=1e-10) -> None | Using Gauss Newton Method to solve the 7 Residual Functions Constructed | ### Hybrid Optimization Algorithms(hybrid) diff --git a/examples/doc/_constrain.md b/examples/doc/_constrain.md index 6d3ccd4..54f67b1 100644 --- a/examples/doc/_constrain.md +++ b/examples/doc/_constrain.md @@ -29,8 +29,8 @@ oc.equal.[函数名]([目标函数], [参数表], [等式约束表], [初始迭 | 方法头 | 解释 | | ----------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -| penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=2, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 增加二次罚项 | -| lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", lamk: Optional[float]=6, sigma: Optional[float]=10, p: Optional[float]=2, etak: Optional[float]=1e-4, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 增广拉格朗日乘子法 | +| penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=2, epsilon: float=1e-4, k: int=0) -> OutputType | 增加二次罚项 | +| lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", lamk: float=6, sigma: float=10, p: float=2, etak: float=1e-4, epsilon: float=1e-6, k: int=0) -> OutputType | 增广拉格朗日乘子法 | ```python @@ -60,9 +60,9 @@ oc.unequal.[函数名]([目标函数], [参数表], [不等式约束表], [初 | 方法头 | 解释 | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- | -| penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=0.4, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 增加二次罚项 | -| penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=12, p: Optional[float]=0.6, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 增加分式函数罚项 | -| lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.2, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-1, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 增广拉格朗日乘子法 | +| penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=0.4, epsilon: float=1e-10, k: int=0) -> OutputType | 增加二次罚项 | +| penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=12, p: float=0.6, epsilon: float=1e-6, k: int=0) -> OutputType | 增加分式函数罚项 | +| lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", muk: float=10, sigma: float=8, alpha: float=0.2, beta: float=0.7, p: float=2, eta: float=1e-1, epsilon: float=1e-4, k: int=0) -> OutputType | 增广拉格朗日乘子法 | ```python @@ -92,9 +92,9 @@ oc.mixequal.[函数名]([目标函数], [参数表], [等式约束表], [不等 | 方法头 | 解释 | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | -| penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 增加二次罚项 | -| penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=1, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | L1精确罚函数法 | -| lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", lamk: Optional[float]=6, muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.5, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-3, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 增广拉格朗日乘子法 | +| penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType | 增加二次罚项 | +| penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=1, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType | L1精确罚函数法 | +| lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", lamk: float=6, muk: float=10, sigma: float=8, alpha: float=0.5, beta: float=0.7, p: float=2, eta: float=1e-3, epsilon: float=1e-4, k: int=0) -> OutputType | 增广拉格朗日乘子法 | ```python @@ -114,9 +114,3 @@ oc.mixequal.penalty_L1(f, (x1, x2), c1, c2, (1.5, 0.5)) (array([2., 1.]), 47) - - - -```python - -``` diff --git a/examples/doc/_example.md b/examples/doc/_example.md index e429c6d..f060617 100644 --- a/examples/doc/_example.md +++ b/examples/doc/_example.md @@ -20,10 +20,10 @@ oe.Lasso.[函数名]([矩阵A], [矩阵b], [因子mu], [参数表], [初始迭 | 方法头 | 解释 | | ------------------------------------------------------------------------------------------------------- | ---------------- | -| gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, delta: Optional[float]=10, alp: Optional[float]=1e-3, epsilon: Optional[float]=1e-2, k: Optional[int]=0) -> OutputType | 光滑化Lasso函数法 | -| subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, alphak: Optional[float]=2e-2, epsilon: Optional[float]=1e-3, k: Optional[int]=0) -> OutputType | 次梯度法Lasso避免一阶不可导 | -| penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, gamma: Optional[float]=0.01, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 罚函数法 | -| approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 邻近算子更新 | +| gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, delta: float=10, alp: float=1e-3, epsilon: float=1e-2, k: int=0) -> OutputType | 光滑化Lasso函数法 | +| subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, alphak: float=2e-2, epsilon: float=1e-3, k: int=0) -> OutputType | 次梯度法Lasso避免一阶不可导 | +| penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, gamma: float=0.01, epsilon: float=1e-6, k: int=0) -> OutputType | 罚函数法 | +| approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-4, k: int=0) -> OutputType | 邻近算子更新 | ```python @@ -69,7 +69,7 @@ oe.WanYuan.[函数名]([直线的斜率], [直线的截距], [二次项系数], | 方法头 | 解释 | | --------------------------------------------------------------- | -------------------- | -| solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: Optional[bool]=False, eps: Optional[float]=1e-10) -> None | 使用高斯-牛顿方法求解构造的7个残差函数 | +| solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: bool=False, eps: float=1e-10) -> None | 使用高斯-牛顿方法求解构造的7个残差函数 | ```python diff --git a/examples/doc/_unconstrain.md b/examples/doc/_unconstrain.md index 32b631a..fde0d85 100644 --- a/examples/doc/_unconstrain.md +++ b/examples/doc/_unconstrain.md @@ -33,9 +33,9 @@ ou.gradient_descent.[函数名]([目标函数], [参数表], [初始迭代点]) | 方法头 | 解释 | | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ | -| solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 通过解方程的方式来求解精确步长 | -| steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 使用线搜索方法求解非精确步长(默认使用wolfe线搜索) | -| barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="Grippo", c1: Optional[float]=0.6, beta: Optional[float]=0.6, alpha: Optional[float]=1, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 使用Grippo与ZhangHanger提出的非单调线搜索方法更新步长 | +| solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType | 通过解方程的方式来求解精确步长 | +| steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-10, k: int=0) -> OutputType | 使用线搜索方法求解非精确步长(默认使用wolfe线搜索) | +| barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="Grippo", c1: float=0.6, beta: float=0.6, alpha: float=1, epsilon: float=1e-10, k: int=0) -> OutputType | 使用Grippo与ZhangHanger提出的非单调线搜索方法更新步长 | ```python @@ -65,9 +65,9 @@ ou.newton.[函数名]([目标函数], [参数表], [初始迭代点]) | 方法头 | 解释 | | ----------------------------------------------------------------------------------------------- | --------------------------------- | -| classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 通过直接对目标函数二阶导矩阵(海瑟矩阵)进行求逆来获取下一步的步长 | -| modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[int]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 修正当前海瑟矩阵保证其正定性(目前只接入了一种修正方法) | -| CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 采用牛顿-共轭梯度法求解梯度(非精确牛顿法的一种) | +| classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType | 通过直接对目标函数二阶导矩阵(海瑟矩阵)进行求逆来获取下一步的步长 | +| modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: int=20, epsilon: float=1e-10, k: int=0) -> OutputType | 修正当前海瑟矩阵保证其正定性(目前只接入了一种修正方法) | +| CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-6, k: int=0) -> OutputType | 采用牛顿-共轭梯度法求解梯度(非精确牛顿法的一种) | ```python @@ -97,9 +97,9 @@ ou.newton_quasi.[函数名]([目标函数], [参数表], [初始迭代点]) | 方法头 | 解释 | | -------------------------------------------------------------------------------------------- | --------------- | -| bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | BFGS方法更新海瑟矩阵 | -| dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=20, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | DFP方法更新海瑟矩阵 | -| L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 双循环方法更新BFGS海瑟矩阵 | +| bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=20, epsilon: float=1e-10, k: int=0) -> OutputType | BFGS方法更新海瑟矩阵 | +| dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=20, epsilon: float=1e-4, k: int=0) -> OutputType | DFP方法更新海瑟矩阵 | +| L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=6, epsilon: float=1e-10, k: int=0) -> OutputType | 双循环方法更新BFGS海瑟矩阵 | @@ -130,8 +130,8 @@ ou.nonlinear_least_square.[函数名]([目标函数], [参数表], [初始迭代 | 方法头 | 解释 | | ---------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 高斯-牛顿提出的方法框架,包括OR分解等操作 | -| levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, lamk: Optional[float]=1, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.9, gamma1: Optional[float]=0.7, gamma2: Optional[float]=1.3, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Levenberg Marquardt提出的方法框架 | +| gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-10, k: int=0) -> OutputType | 高斯-牛顿提出的方法框架,包括OR分解等操作 | +| levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, lamk: float=1, eta: float=0.2, p1: float=0.4, p2: float=0.9, gamma1: float=0.7, gamma2: float=1.3, epsilon: float=1e-10, k: int=0) -> OutputType | Levenberg Marquardt提出的方法框架 | ```python @@ -164,7 +164,7 @@ ou.trust_region.[函数名]([目标函数], [参数表], [初始迭代点]) | 方法头 | 解释 | | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | -| steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, r0: Optional[float]=1, rmax: Optional[float]=2, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.6, gamma1: Optional[float]=0.5, gamma2: Optional[float]=1.5, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 截断共轭梯度法在此方法中被用于搜索步长 | +| steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, r0: float=1, rmax: float=2, eta: float=0.2, p1: float=0.4, p2: float=0.6, gamma1: float=0.5, gamma2: float=1.5, epsilon: float=1e-6, k: int=0) -> OutputType | 截断共轭梯度法在此方法中被用于搜索步长 | ```python diff --git a/optimtool/__init__.py b/optimtool/__init__.py index 660245a..bfa023c 100644 --- a/optimtool/__init__.py +++ b/optimtool/__init__.py @@ -28,4 +28,4 @@ from ._version import __version__ if sys.version_info < (3, 7, 0): - raise OSError(f'optimtool-2.4.0 requires Python >=3.7, but yours is {sys.version}') \ No newline at end of file + raise OSError(f'optimtool-2.4.1 requires Python >=3.7, but yours is {sys.version}') \ No newline at end of file diff --git a/optimtool/_convert.py b/optimtool/_convert.py index 47ba608..62cd395 100644 --- a/optimtool/_convert.py +++ b/optimtool/_convert.py @@ -20,7 +20,7 @@ import sympy as sp import numpy as np -from ._typing import FuncArray, ArgArray, PointArray, SympyMutableDenseMatrix, NDArray, Optional +from ._typing import FuncArray, ArgArray, PointArray, SympyMutableDenseMatrix, NDArray def f2m(funcs: FuncArray) -> SympyMutableDenseMatrix: ''' @@ -80,7 +80,7 @@ def p2t(x_0: PointArray) -> PointArray: x_0 = (x_0) return x_0 -def h2h(hessian: NDArray, m: float, pk: Optional[int]=1) -> NDArray: +def h2h(hessian: NDArray, m: float, pk: int=1) -> NDArray: ''' Parameters ---------- diff --git a/optimtool/_drive.py b/optimtool/_drive.py index cc2fb6e..b957843 100644 --- a/optimtool/_drive.py +++ b/optimtool/_drive.py @@ -20,7 +20,7 @@ import numpy as np import sympy as sp -from ._typing import SympyMutableDenseMatrix, List, IterPointType, NDArray, Optional, DataType, Tuple +from ._typing import SympyMutableDenseMatrix, List, IterPointType, NDArray, DataType, Tuple def Q_k(eta: float, k: int) -> float: ''' @@ -142,7 +142,7 @@ def get_subgradient(resv: NDArray, argsv: NDArray, mu: float) -> DataType: f.append(i - mu * 1) return f[0] -def CG_gradient(A: NDArray, b: NDArray, dk: NDArray, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> Tuple[NDArray,int]: +def CG_gradient(A: NDArray, b: NDArray, dk: NDArray, epsilon: float=1e-6, k: int=0) -> Tuple[NDArray,int]: ''' Parameters ---------- @@ -155,10 +155,10 @@ def CG_gradient(A: NDArray, b: NDArray, dk: NDArray, epsilon: Optional[float]=1e dk : NDArray 初始梯度下降方向(列向量) - epsilon : Optional[float] + epsilon : float 精度 - k : Optional[int] + k : int 迭代次数 @@ -258,7 +258,7 @@ def Eq_Sovle(sk: NDArray, pk: NDArray, delta: float): mt = sp.solve(h) return mt[0] -def steihaug(sk: List[int], rk: NDArray, pk: NDArray, B: NDArray, delta: float, epsilon: Optional[float]=1e-3, k: Optional[int]=0) -> Tuple[NDArray,int]: +def steihaug(sk: List[int], rk: NDArray, pk: NDArray, B: NDArray, delta: float, epsilon: float=1e-3, k: int=0) -> Tuple[NDArray,int]: ''' Parameters ---------- @@ -277,10 +277,10 @@ def steihaug(sk: List[int], rk: NDArray, pk: NDArray, B: NDArray, delta: float, delta : float 搜索半径 - epsilon : Optional[float] + epsilon : float 精度 - k : Optional[int] + k : int 迭代次数 diff --git a/optimtool/_search.py b/optimtool/_search.py index 23ce3a8..8ab0dba 100644 --- a/optimtool/_search.py +++ b/optimtool/_search.py @@ -19,10 +19,10 @@ # SOFTWARE. import numpy as np -from ._typing import Optional, List, NDArray, SympyMutableDenseMatrix, DataType, IterPointType +from ._typing import List, NDArray, SympyMutableDenseMatrix, DataType, IterPointType # Armijo线搜索准则 -def armijo(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, gamma: Optional[float]=0.5, c: Optional[float]=0.1) -> float: +def armijo(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, gamma: float=0.5, c: float=0.1) -> float: ''' Parameters ---------- @@ -33,15 +33,15 @@ def armijo(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: I 参数列表 x_0 : IterPointType - 初始迭代点列表 + 初始迭代点 d : NDArray 当前下降方向 - gamma : Optional[float] + gamma : float 修正参数 - c : Optional[float] + c : float 常数 @@ -70,7 +70,7 @@ def armijo(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: I return alpha # Goldstein线搜索准则 -def goldstein(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, c: Optional[float]=0.1, alphas: Optional[float]=0, alphae: Optional[float]=10, t: Optional[float]=1.2, eps: Optional[float]=1e-3) -> float: +def goldstein(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, c: float=0.1, alphas: float=0, alphae: float=10, t: float=1.2, eps: float=1e-3) -> float: ''' Parameters ---------- @@ -81,21 +81,21 @@ def goldstein(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0 参数列表 x_0 : IterPointType - 初始迭代点列表 + 初始迭代点 d : NDArray 当前下降方向 - alphas : Optional[float] + alphas : float 起始搜索区间 - alphae : Optional[float] + alphae : float 终止搜索区间 - t : Optional[float] + t : float 扩大倍数参数 - eps : Optional[float] + eps : float 终止参数 @@ -134,7 +134,7 @@ def goldstein(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0 return alpha # Wolfe线搜索准则 -def wolfe(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, c1: Optional[float]=0.3, c2: Optional[float]=0.5, alphas: Optional[float]=0, alphae: Optional[float]=2, eps: Optional[float]=1e-3) -> float: +def wolfe(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, c1: float=0.3, c2: float=0.5, alphas: float=0, alphae: float=2, eps: float=1e-3) -> float: ''' Parameters ---------- @@ -145,24 +145,24 @@ def wolfe(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: It 参数列表 x_0 : IterPointType - 初始迭代点列表 + 初始迭代点 d : NDArray 当前下降方向 - c1 : Optional[float] + c1 : float 常数 - c2 : Optional[float] + c2 : float 常数 - alphas : Optional[float] + alphas : float 起始搜索区间 - alphae : Optional[float] + alphae : float 终止搜索区间 - eps : Optional[float] + eps : float 终止参数 @@ -201,7 +201,7 @@ def wolfe(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: It return alpha # 非单调线搜索准则之Grippo(一般与Barzilar Borwein梯度下降法配合使用) -def Grippo(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, k: int, point: List[IterPointType], c1: float, beta: float, alpha: float, M: Optional[int]=20) -> float: +def Grippo(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, k: int, point: List[IterPointType], c1: float, beta: float, alpha: float, M: int=20) -> float: ''' Parameters ---------- @@ -212,7 +212,7 @@ def Grippo(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: I 参数列表 x_0 : IterPointType - 初始迭代点列表 + 初始迭代点 d : NDArray 当前下降方向 @@ -264,7 +264,7 @@ def Grippo(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: I return alpha # 非单调线搜索准则之ZhangHanger(一般与程序配套使用) -def ZhangHanger(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, k: int, point: List[IterPointType], c1: float, beta: float, alpha: float, eta: Optional[float]=0.6) -> float: +def ZhangHanger(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, d: NDArray, k: int, point: List[IterPointType], c1: float, beta: float, alpha: float, eta: float=0.6) -> float: ''' Parameters ---------- @@ -275,7 +275,7 @@ def ZhangHanger(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x 参数列表 x_0 : IterPointType - 初始迭代点列表 + 初始迭代点 d : NDArray 当前下降方向 diff --git a/optimtool/_utils.py b/optimtool/_utils.py index 05b67fd..55aa0fe 100644 --- a/optimtool/_utils.py +++ b/optimtool/_utils.py @@ -18,7 +18,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -from ._typing import DataType, Optional, SympyMutableDenseMatrix, List, IterPointType, Union, PointType +from ._typing import DataType, Optional, SympyMutableDenseMatrix, List, IterPointType def get_value(funcs: SympyMutableDenseMatrix, args: SympyMutableDenseMatrix, x_0: IterPointType, mu: Optional[float]=None) -> DataType: ''' diff --git a/optimtool/_version.py b/optimtool/_version.py index 1530b97..37eed71 100644 --- a/optimtool/_version.py +++ b/optimtool/_version.py @@ -18,4 +18,4 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -__version__ = '2.4.0' \ No newline at end of file +__version__ = '2.4.1' \ No newline at end of file diff --git a/optimtool/constrain/equal.py b/optimtool/constrain/equal.py index 0c7b81a..63dd240 100644 --- a/optimtool/constrain/equal.py +++ b/optimtool/constrain/equal.py @@ -23,10 +23,10 @@ from .._utils import get_value, plot_iteration from .._convert import f2m, a2m, p2t -from .._typing import FuncArray, ArgArray, PointArray, Optional, OutputType, DataType +from .._typing import FuncArray, ArgArray, PointArray, OutputType, DataType # 二次罚函数法(等式约束) -def penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=2, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType: +def penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10.0, p: float=2.0, epsilon: float=1e-4, k: int=0) -> OutputType: ''' Parameters ---------- @@ -40,24 +40,24 @@ def penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: 等式参数约束列表 x_0 : PointArray - 初始迭代点列表 + 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 无约束优化方法内核 - sigma : Optional[float] + sigma : float 罚函数因子 - p : Optional[float] + p : float 修正参数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 k : int @@ -94,7 +94,7 @@ def penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: return (x_0, k, f) if output_f is True else (x_0, k) # 增广拉格朗日函数乘子法(等式约束) -def lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", lamk: Optional[float]=6, sigma: Optional[float]=10, p: Optional[float]=2, etak: Optional[float]=1e-4, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType: +def lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", lamk: float=6, sigma: float=10, p: float=2, etak: float=1e-4, epsilon: float=1e-6, k: int=0) -> OutputType: ''' Parameters ---------- @@ -108,33 +108,33 @@ def lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: 等式参数约束列表 x_0 : PointArray - 初始迭代点列表 + 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 无约束优化方法内核 - lamk : Optional[float] + lamk : float 因子 - sigma : Optional[float] + sigma : float 罚函数因子 - p : Optional[float] + p : float 修正参数 - etak : Optional[float] + etak : float 常数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 diff --git a/optimtool/constrain/mixequal.py b/optimtool/constrain/mixequal.py index 178a721..0b47113 100644 --- a/optimtool/constrain/mixequal.py +++ b/optimtool/constrain/mixequal.py @@ -23,10 +23,10 @@ from .._utils import get_value, plot_iteration from .._convert import f2m, a2m, p2t -from .._typing import FuncArray, ArgArray, PointArray, Optional, OutputType, DataType +from .._typing import FuncArray, ArgArray, PointArray, OutputType, DataType # 二次罚函数法(混合约束) -def penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -43,27 +43,27 @@ def penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, 不等式参数约束列表 x_0 : PointArray - 初始迭代点列表 + 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 无约束优化方法内核 - sigma : Optional[float] + sigma : float 罚函数因子 - p : Optional[float] + p : float 修正参数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -99,7 +99,7 @@ def penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, return (x_0, k, f) if output_f is True else (x_0, k) # 精确罚函数法-l1罚函数法 (混合约束) -def penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=1, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=1, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -116,27 +116,27 @@ def penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_une 不等式参数约束列表 x_0 : PointArray - 初始迭代点列表 + 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 无约束优化方法内核 - sigma : Optional[float] + sigma : float 罚函数因子 - p : Optional[float] + p : float 修正参数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -175,7 +175,7 @@ def penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_une return (x_0, k, f) if output_f is True else (x_0, k) # 增广拉格朗日函数法(混合约束) -def lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", lamk: Optional[float]=6, muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.5, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-3, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType: +def lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", lamk: float=6, muk: float=10, sigma: float=8, alpha: float=0.5, beta: float=0.7, p: float=2, eta: float=1e-3, epsilon: float=1e-4, k: int=0) -> OutputType: ''' Parameters ---------- @@ -192,42 +192,42 @@ def lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, 不等式参数约束列表 x_0 : PointArray - 初始迭代点列表 + 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 无约束优化方法内核 - lamk : Optional[float] + lamk : float 因子 - muk : Optional[float] + muk : float 因子 - sigma : Optional[float] + sigma : float 罚函数因子 - alpha : Optional[float] + alpha : float 初始步长 - beta : Optional[float] + beta : float 修正参数 - p : Optional[float] + p : float 修正参数 - eta : Optional[float] + eta : float 常数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 diff --git a/optimtool/constrain/unequal.py b/optimtool/constrain/unequal.py index ac4c138..1b1c632 100644 --- a/optimtool/constrain/unequal.py +++ b/optimtool/constrain/unequal.py @@ -23,10 +23,10 @@ from .._utils import get_value, plot_iteration from .._convert import f2m, a2m, p2t -from .._typing import FuncArray, ArgArray, PointArray, Optional, OutputType, DataType +from .._typing import FuncArray, ArgArray, PointArray, OutputType, DataType # 二次罚函数法(不等式约束) -def penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=10, p: Optional[float]=0.4, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=10, p: float=0.4, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -42,25 +42,25 @@ def penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: P x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 无约束优化方法内核 - sigma : Optional[float] + sigma : float 罚函数因子 - p : Optional[float] + p : float 修正参数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -102,7 +102,7 @@ def penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: P ''' # 分式 -def penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", sigma: Optional[float]=12, p: Optional[float]=0.6, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType: +def penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", sigma: float=12, p: float=0.6, epsilon: float=1e-6, k: int=0) -> OutputType: ''' Parameters ---------- @@ -118,25 +118,25 @@ def penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 无约束优化方法内核 - sigma : Optional[float] + sigma : float 罚函数因子 - p : Optional[float] + p : float 修正参数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -173,7 +173,7 @@ def penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, return (x_0, k, f) if output_f is True else (x_0, k) # 增广拉格朗日函数法(不等式约束) -def lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="gradient_descent", muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.2, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-1, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType: +def lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="gradient_descent", muk: float=10, sigma: float=8, alpha: float=0.2, beta: float=0.7, p: float=2, eta: float=1e-1, epsilon: float=1e-4, k: int=0) -> OutputType: ''' Parameters ---------- @@ -187,39 +187,39 @@ def lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: 不等式参数约束列表 x_0 : PointArray - 初始迭代点列表 + 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 无约束优化方法内核 - muk : Optional[float] + muk : float 因子 - sigma : Optional[float] + sigma : float 罚函数因子 - alpha : Optional[float] + alpha : float 初始步长 - beta : Optional[float] + beta : float 修正参数 - p : Optional[float] + p : float 修正参数 - eta : Optional[float] + eta : float 常数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 diff --git a/optimtool/example/Lasso.py b/optimtool/example/Lasso.py index 53e236b..fa7f886 100644 --- a/optimtool/example/Lasso.py +++ b/optimtool/example/Lasso.py @@ -23,9 +23,9 @@ from .._convert import a2m from .._utils import get_value, plot_iteration -from .._typing import NDArray, ArgArray, PointArray, Optional, OutputType, DataType +from .._typing import NDArray, ArgArray, PointArray, OutputType, DataType -def gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, delta: Optional[float]=10, alp: Optional[float]=1e-3, epsilon: Optional[float]=1e-2, k: Optional[int]=0) -> OutputType: +def gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, delta: float=10, alp: float=1e-3, epsilon: float=1e-2, k: int=0) -> OutputType: ''' Parameters ---------- @@ -35,7 +35,7 @@ def gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, b : NDArray m*1维数 参数矩阵 - mu : Optional[float] + mu : float 正则化参数 args : ArgArray @@ -44,22 +44,22 @@ def gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - delta : Optional[float] + delta : float 常数 - alp : Optional[float] + alp : float 步长阈值 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -97,7 +97,7 @@ def gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, ''' 次梯度算法 ''' -def subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, alphak: Optional[float]=2e-2, epsilon: Optional[float]=1e-3, k: Optional[int]=0) -> OutputType: +def subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, alphak: float=2e-2, epsilon: float=1e-3, k: int=0) -> OutputType: ''' Parameters ---------- @@ -116,19 +116,19 @@ def subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArr x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - alphak : Optional[float] + alphak : float 自适应步长参数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -164,7 +164,7 @@ def subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArr ''' 罚函数法 ''' -def penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, gamma: Optional[float]=0.01, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType: +def penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, gamma: float=0.01, epsilon: float=1e-6, k: int=0) -> OutputType: ''' Parameters ---------- @@ -183,19 +183,19 @@ def penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - gamma : Optional[float] + gamma : float 因子 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -224,7 +224,7 @@ def penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, ''' 近似点梯度法 ''' -def approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType: +def approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-4, k: int=0) -> OutputType: ''' Parameters ---------- @@ -243,16 +243,16 @@ def approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: Po x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 diff --git a/optimtool/example/WanYuan.py b/optimtool/example/WanYuan.py index e9fd927..2f58040 100644 --- a/optimtool/example/WanYuan.py +++ b/optimtool/example/WanYuan.py @@ -22,9 +22,9 @@ import numpy as np import matplotlib.pyplot as plt -from .._typing import FuncType, Tuple, FuncArray, ArgArray, Optional +from .._typing import FuncType, Tuple, FuncArray, ArgArray -def solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: Optional[bool]=False, eps: Optional[float]=1e-10) -> None: +def solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: bool=False, eps: float=1e-10) -> None: ''' Parameters ---------- @@ -52,7 +52,7 @@ def solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: fl x_0 : tuple 初始点:(x0, y0, x1, y2, x2, y2) - draw : Optional[bool] + draw : bool 绘图接口 diff --git a/optimtool/unconstrain/gradient_descent.py b/optimtool/unconstrain/gradient_descent.py index 27e0f3e..484c451 100644 --- a/optimtool/unconstrain/gradient_descent.py +++ b/optimtool/unconstrain/gradient_descent.py @@ -23,10 +23,10 @@ from .._utils import get_value, plot_iteration from .._convert import f2m, a2m, p2t -from .._typing import FuncArray, ArgArray, PointArray, Optional, DataType, OutputType +from .._typing import FuncArray, ArgArray, PointArray, DataType, OutputType # 梯度下降法 -def solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -39,16 +39,16 @@ def solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -80,7 +80,7 @@ def solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool return (x_0, k, fx) if output_f is True else (x_0, k) # 最速下降法 -def steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -93,19 +93,19 @@ def steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[b x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 非精确线搜索方法 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -134,7 +134,7 @@ def steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[b return (x_0, k, fx) if output_f is True else (x_0, k) # Barzilar Borwein梯度下降法 -def barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="Grippo", c1: Optional[float]=0.6, beta: Optional[float]=0.6, alpha: Optional[float]=1, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="Grippo", c1: float=0.6, beta: float=0.6, alpha: float=1, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -147,31 +147,31 @@ def barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Op x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 非单调线搜索方法:"Grippo"与"ZhangHanger" - M : Optional[int] + M : int 阈值 - c1 : Optional[float] + c1 : float 常数 - beta : Optional[float] + beta : float 常数 - alpha : Optional[float] + alpha : float 初始步长 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 diff --git a/optimtool/unconstrain/newton.py b/optimtool/unconstrain/newton.py index d74e5dd..22be9fd 100644 --- a/optimtool/unconstrain/newton.py +++ b/optimtool/unconstrain/newton.py @@ -22,10 +22,10 @@ from .._utils import get_value, plot_iteration from .._convert import f2m, a2m, p2t, h2h -from .._typing import FuncArray, ArgArray, PointArray, Optional, OutputType, DataType +from .._typing import FuncArray, ArgArray, PointArray, OutputType, DataType # 经典牛顿法 -def classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -38,16 +38,16 @@ def classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bo x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -77,7 +77,7 @@ def classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bo return (x_0, k, f) if output_f is True else (x_0, k) # 修正牛顿法 -def modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[int]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: int=20, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -90,22 +90,22 @@ def modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[b x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 单调线搜索方法:"armijo", "goldstein", "wolfe" - m : Optional[float] + m : float 海瑟矩阵条件数阈值 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -139,7 +139,7 @@ def modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[b return (x_0, k, f) if output_f is True else (x_0, k) # 非精确牛顿法 -def CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType: +def CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-6, k: int=0) -> OutputType: ''' Parameters ---------- @@ -152,19 +152,19 @@ def CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=T x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 单调线搜索方法:"armijo", "goldstein", "wolfe" - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 diff --git a/optimtool/unconstrain/newton_quasi.py b/optimtool/unconstrain/newton_quasi.py index 9b52e9e..7681ca2 100644 --- a/optimtool/unconstrain/newton_quasi.py +++ b/optimtool/unconstrain/newton_quasi.py @@ -22,10 +22,10 @@ from .._utils import get_value, plot_iteration from .._convert import f2m, a2m, p2t, h2h -from .._typing import FuncArray, ArgArray, PointArray, Optional, OutputType, DataType +from .._typing import FuncArray, ArgArray, PointArray, OutputType, DataType # BFGS拟牛顿法 -def bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=20, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -38,22 +38,22 @@ def bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool] x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 单调线搜索方法:"armijo", "goldstein", "wolfe" - m : Optional[float] + m : float 海瑟矩阵条件数阈值 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -92,7 +92,7 @@ def bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool] return (x_0, k, f) if output_f is True else (x_0, k) # DFP拟牛顿法 -def dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=20, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType: +def dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=20, epsilon: float=1e-4, k: int=0) -> OutputType: ''' Parameters ---------- @@ -105,22 +105,22 @@ def dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]= x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 单调线搜索方法:"armijo", "goldstein", "wolfe" - m : Optional[float] + m : float 海瑟矩阵条件数阈值 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -160,7 +160,7 @@ def dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]= return (x_0, k, f) if output_f is True else (x_0, k) # L_BFGS方法 -def L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", m: Optional[float]=6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", m: float=6, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -173,22 +173,22 @@ def L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[boo x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 单调线搜索方法:"armijo", "goldstein", "wolfe" - m : Optional[float] + m : float 海瑟矩阵条件数阈值 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 diff --git a/optimtool/unconstrain/nonlinear_least_square.py b/optimtool/unconstrain/nonlinear_least_square.py index 3161077..d80115e 100644 --- a/optimtool/unconstrain/nonlinear_least_square.py +++ b/optimtool/unconstrain/nonlinear_least_square.py @@ -23,10 +23,10 @@ from .._convert import f2m, a2m, p2t, h2h from .._utils import get_value, plot_iteration -from .._typing import FuncArray, ArgArray, PointArray, Optional, OutputType, DataType +from .._typing import FuncArray, ArgArray, PointArray, OutputType, DataType # 高斯-牛顿法(非线性最小二乘问题) -def gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]="wolfe", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str="wolfe", epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -39,19 +39,19 @@ def gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Option x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - method : Optional[str] + method : str 单调线搜索方法:"armijo", "goldstein", "wolfe" - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 @@ -84,7 +84,7 @@ def gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Option return (x_0, k, f) if output_f is True else (x_0, k) # levenberg marquardt方法 -def levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, lamk: Optional[float]=1, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.9, gamma1: Optional[float]=0.7, gamma2: Optional[float]=1.3, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType: +def levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, lamk: float=1, eta: float=0.2, p1: float=0.4, p2: float=0.9, gamma1: float=0.7, gamma2: float=1.3, epsilon: float=1e-10, k: int=0) -> OutputType: ''' Parameters ---------- @@ -97,37 +97,37 @@ def levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - m : Optional[float] + m : float 海瑟矩阵条件数阈值 - lamk : Optional[float] + lamk : float 修正常数 - eta : Optional[float] + eta : float 常数 - p1 : Optional[float] + p1 : float 常数 - p2 : Optional[float] + p2 : float 常数 - gamma1 : Optional[float] + gamma1 : float 常数 - gamma2 : Optional[float] + gamma2 : float 常数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 diff --git a/optimtool/unconstrain/trust_region.py b/optimtool/unconstrain/trust_region.py index f46d897..0a18378 100644 --- a/optimtool/unconstrain/trust_region.py +++ b/optimtool/unconstrain/trust_region.py @@ -22,10 +22,10 @@ from .._utils import plot_iteration from .._convert import f2m, a2m, p2t, h2h -from .._typing import FuncArray, ArgArray, PointArray, Optional, OutputType, DataType +from .._typing import FuncArray, ArgArray, PointArray, OutputType, DataType # 信赖域算法 -def steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, r0: Optional[float]=1, rmax: Optional[float]=2, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.6, gamma1: Optional[float]=0.5, gamma2: Optional[float]=1.5, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType: +def steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, r0: float=1, rmax: float=2, eta: float=0.2, p1: float=0.4, p2: float=0.6, gamma1: float=0.5, gamma2: float=1.5, epsilon: float=1e-6, k: int=0) -> OutputType: ''' Parameters ---------- @@ -38,40 +38,40 @@ def steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optiona x_0 : PointArray 初始迭代点 - draw : Optional[bool] + draw : bool 绘图接口参数 - output_f : Optional[bool] + output_f : bool 输出迭代函数值列表 - m : Optional[float] + m : float 海瑟矩阵条件数阈值 - r0 : Optional[float] + r0 : float 搜索半径起点 - rmax : Optional[float] + rmax : float 搜索最大半径 - eta : Optional[float] + eta : float 常数 - p1 : Optional[float] + p1 : float 常数 - p2 : Optional[float] + p2 : float 常数 - gamma1 : Optional[float] + gamma1 : float 常数 - gamma2 : Optional[float] + gamma2 : float 常数 - epsilon : Optional[float] + epsilon : float 迭代停机准则 - k : Optional[int] + k : int 迭代次数 diff --git a/setup.py b/setup.py index 4520a84..07b6c87 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import setup if sys.version_info < (3, 7, 0): - raise OSError(f'optimtool-2.4.0 requires Python >=3.7, but yours is {sys.version}') + raise OSError(f'optimtool-2.4.1 requires Python >=3.7, but yours is {sys.version}') if (3, 7, 0) <= sys.version_info < (3, 8, 0): # https://github.com/pypa/setuptools/issues/926#issuecomment-294369342 @@ -61,6 +61,7 @@ 'Tracker': 'https://github.com/linjing-lab/optimtool/issues', }, classifiers=[ + 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', diff --git a/tests/constrain/_constrain.ipynb.txt b/tests/constrain/_constrain.ipynb.txt index fc05465..9bc1689 100644 --- a/tests/constrain/_constrain.ipynb.txt +++ b/tests/constrain/_constrain.ipynb.txt @@ -53,8 +53,8 @@ "\n", "| 方法头 | 解释 |\n", "| ----------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |\n", - "| penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"gradient_descent\", sigma: Optional[float]=10, p: Optional[float]=2, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 增加二次罚项 |\n", - "| lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"gradient_descent\", lamk: Optional[float]=6, sigma: Optional[float]=10, p: Optional[float]=2, etak: Optional[float]=1e-4, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 增广拉格朗日乘子法 |" + "| penalty_quadratice(funcs: FuncArray, args: FuncArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"gradient_descent\", sigma: float=10, p: float=2, epsilon: float=1e-4, k: int=0) -> OutputType | 增加二次罚项 |\n", + "| lagrange_augmentede(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"gradient_descent\", lamk: float=6, sigma: float=10, p: float=2, etak: float=1e-4, epsilon: float=1e-6, k: int=0) -> OutputType | 增广拉格朗日乘子法 |" ] }, { @@ -103,9 +103,9 @@ "\n", "| 方法头 | 解释 |\n", "| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------- |\n", - "| penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"gradient_descent\", sigma: Optional[float]=10, p: Optional[float]=0.4, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 增加二次罚项 |\n", - "| penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"gradient_descent\", sigma: Optional[float]=12, p: Optional[float]=0.6, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 增加分式函数罚项 |\n", - "| lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"gradient_descent\", muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.2, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-1, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 增广拉格朗日乘子法 |" + "| penalty_quadraticu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"gradient_descent\", sigma: float=10, p: float=0.4, epsilon: float=1e-10, k: int=0) -> OutputType | 增加二次罚项 |\n", + "| penalty_interior_fraction(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"gradient_descent\", sigma: float=12, p: float=0.6, epsilon: float=1e-6, k: int=0) -> OutputType | 增加分式函数罚项 |\n", + "| lagrange_augmentedu(funcs: FuncArray, args: ArgArray, cons: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"gradient_descent\", muk: float=10, sigma: float=8, alpha: float=0.2, beta: float=0.7, p: float=2, eta: float=1e-1, epsilon: float=1e-4, k: int=0) -> OutputType | 增广拉格朗日乘子法 |" ] }, { @@ -149,14 +149,14 @@ "## 混合等式约束(mixequal)\n", "\n", "```python\n", - "oc.mixequal.[函数名]([目标函数], [参数表], [等式约束表], [不等式约束表], [初始迭代点])\n", + "oc.mixequal.[函数名([目标函数], [参数表], [等式约束表], [不等式约束表], [初始迭代点])\n", "```\n", "\n", "| 方法头 | 解释 |\n", "| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- |\n", - "| penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"gradient_descent\", sigma: Optional[float]=10, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 增加二次罚项 |\n", - "| penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"gradient_descent\", sigma: Optional[float]=1, p: Optional[float]=0.6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | L1精确罚函数法 |\n", - "| lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"gradient_descent\", lamk: Optional[float]=6, muk: Optional[float]=10, sigma: Optional[float]=8, alpha: Optional[float]=0.5, beta: Optional[float]=0.7, p: Optional[float]=2, eta: Optional[float]=1e-3, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 增广拉格朗日乘子法 |" + "| penalty_quadraticm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"gradient_descent\", sigma: float=10, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType | 增加二次罚项 |\n", + "| penalty_L1(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"gradient_descent\", sigma: float=1, p: float=0.6, epsilon: float=1e-10, k: int=0) -> OutputType | L1精确罚函数法 |\n", + "| lagrange_augmentedm(funcs: FuncArray, args: ArgArray, cons_equal: FuncArray, cons_unequal: FuncArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"gradient_descent\", lamk: float=6, muk: float=10, sigma: float=8, alpha: float=0.5, beta: float=0.7, p: float=2, eta: float=1e-3, epsilon: float=1e-4, k: int=0) -> OutputType | 增广拉格朗日乘子法 |" ] }, { diff --git a/tests/example/_example.ipynb.txt b/tests/example/_example.ipynb.txt index 713242c..3f4f2c2 100644 --- a/tests/example/_example.ipynb.txt +++ b/tests/example/_example.ipynb.txt @@ -44,10 +44,10 @@ "\n", "| 方法头 | 解释 |\n", "| ------------------------------------------------------------------------------------------------------- | ---------------- |\n", - "| gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, delta: Optional[float]=10, alp: Optional[float]=1e-3, epsilon: Optional[float]=1e-2, k: Optional[int]=0) -> OutputType | 光滑化Lasso函数法 |\n", - "| subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, alphak: Optional[float]=2e-2, epsilon: Optional[float]=1e-3, k: Optional[int]=0) -> OutputType | 次梯度法Lasso避免一阶不可导 |\n", - "| penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, gamma: Optional[float]=0.01, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 罚函数法 |\n", - "| approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | 邻近算子更新 |" + "| gradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, delta: float=10, alp: float=1e-3, epsilon: float=1e-2, k: int=0) -> OutputType | 光滑化Lasso函数法 |\n", + "| subgradient(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, alphak: float=2e-2, epsilon: float=1e-3, k: int=0) -> OutputType | 次梯度法Lasso避免一阶不可导 |\n", + "| penalty(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, gamma: float=0.01, epsilon: float=1e-6, k: int=0) -> OutputType | 罚函数法 |\n", + "| approximate_point(A: NDArray, b: NDArray, mu: float, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-4, k: int=0) -> OutputType | 邻近算子更新 |" ] }, { @@ -112,7 +112,7 @@ "\n", "| 方法头 | 解释 |\n", "| --------------------------------------------------------------- | -------------------- |\n", - "| solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: Optional[bool]=False, eps: Optional[float]=1e-10) -> None | 使用高斯-牛顿方法求解构造的7个残差函数 |" + "| solution(m: float, n: float, a: float, b: float, c: float, x3: float, y3: float, x_0: tuple, draw: bool=False, eps: float=1e-10) -> None | 使用高斯-牛顿方法求解构造的7个残差函数 |" ] }, { diff --git a/tests/unconstrain/_unconstrain.ipynb.txt b/tests/unconstrain/_unconstrain.ipynb.txt index 34968f2..6b00561 100644 --- a/tests/unconstrain/_unconstrain.ipynb.txt +++ b/tests/unconstrain/_unconstrain.ipynb.txt @@ -55,9 +55,9 @@ "\n", "| 方法头 | 解释 |\n", "| ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |\n", - "| solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 通过解方程的方式来求解精确步长 |\n", - "| steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"wolfe\", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 使用线搜索方法求解非精确步长(默认使用wolfe线搜索) |\n", - "| barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"Grippo\", c1: Optional[float]=0.6, beta: Optional[float]=0.6, alpha: Optional[float]=1, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 使用Grippo与ZhangHanger提出的非单调线搜索方法更新步长 |" + "| solve(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType | 通过解方程的方式来求解精确步长 |\n", + "| steepest(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"wolfe\", epsilon: float=1e-10, k: int=0) -> OutputType | 使用线搜索方法求解非精确步长(默认使用wolfe线搜索) |\n", + "| barzilar_borwein(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"Grippo\", c1: float=0.6, beta: float=0.6, alpha: float=1, epsilon: float=1e-10, k: int=0) -> OutputType | 使用Grippo与ZhangHanger提出的非单调线搜索方法更新步长 |" ] }, { @@ -104,9 +104,9 @@ "\n", "| 方法头 | 解释 |\n", "| ----------------------------------------------------------------------------------------------- | --------------------------------- |\n", - "| classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 通过直接对目标函数二阶导矩阵(海瑟矩阵)进行求逆来获取下一步的步长 |\n", - "| modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"wolfe\", m: Optional[int]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 修正当前海瑟矩阵保证其正定性(目前只接入了一种修正方法) |\n", - "| CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"wolfe\", epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 采用牛顿-共轭梯度法求解梯度(非精确牛顿法的一种) |" + "| classic(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, epsilon: float=1e-10, k: int=0) -> OutputType | 通过直接对目标函数二阶导矩阵(海瑟矩阵)进行求逆来获取下一步的步长 |\n", + "| modified(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"wolfe\", m: int=20, epsilon: float=1e-10, k: int=0) -> OutputType | 修正当前海瑟矩阵保证其正定性(目前只接入了一种修正方法) |\n", + "| CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"wolfe\", epsilon: float=1e-6, k: int=0) -> OutputType | 采用牛顿-共轭梯度法求解梯度(非精确牛顿法的一种) |" ] }, { @@ -153,9 +153,9 @@ "\n", "| 方法头 | 解释 |\n", "| -------------------------------------------------------------------------------------------- | --------------- |\n", - "| bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"wolfe\", m: Optional[float]=20, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | BFGS方法更新海瑟矩阵 |\n", - "| dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"wolfe\", m: Optional[float]=20, epsilon: Optional[float]=1e-4, k: Optional[int]=0) -> OutputType | DFP方法更新海瑟矩阵 |\n", - "| L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"wolfe\", m: Optional[float]=6, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 双循环方法更新BFGS海瑟矩阵 |\n" + "| bfgs(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"wolfe\", m: float=20, epsilon: float=1e-10, k: int=0) -> OutputType | BFGS方法更新海瑟矩阵 |\n", + "| dfp(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"wolfe\", m: float=20, epsilon: float=1e-4, k: int=0) -> OutputType | DFP方法更新海瑟矩阵 |\n", + "| L_BFGS(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"wolfe\", m: float=6, epsilon: float=1e-10, k: int=0) -> OutputType | 双循环方法更新BFGS海瑟矩阵 |\n" ] }, { @@ -202,8 +202,8 @@ "\n", "| 方法头 | 解释 |\n", "| ---------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- |\n", - "| gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, method: Optional[str]=\"wolfe\", epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | 高斯-牛顿提出的方法框架,包括OR分解等操作 |\n", - "| levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, lamk: Optional[float]=1, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.9, gamma1: Optional[float]=0.7, gamma2: Optional[float]=1.3, epsilon: Optional[float]=1e-10, k: Optional[int]=0) -> OutputType | Levenberg Marquardt提出的方法框架 |" + "| gauss_newton(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, method: str=\"wolfe\", epsilon: float=1e-10, k: int=0) -> OutputType | 高斯-牛顿提出的方法框架,包括OR分解等操作 |\n", + "| levenberg_marquardt(funcr: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, lamk: float=1, eta: float=0.2, p1: float=0.4, p2: float=0.9, gamma1: float=0.7, gamma2: float=1.3, epsilon: float=1e-10, k: int=0) -> OutputType | Levenberg Marquardt提出的方法框架 |" ] }, { @@ -255,7 +255,7 @@ "\n", "| 方法头 | 解释 |\n", "| ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- |\n", - "| steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: Optional[bool]=True, output_f: Optional[bool]=False, m: Optional[float]=100, r0: Optional[float]=1, rmax: Optional[float]=2, eta: Optional[float]=0.2, p1: Optional[float]=0.4, p2: Optional[float]=0.6, gamma1: Optional[float]=0.5, gamma2: Optional[float]=1.5, epsilon: Optional[float]=1e-6, k: Optional[int]=0) -> OutputType | 截断共轭梯度法在此方法中被用于搜索步长 |" + "| steihaug_CG(funcs: FuncArray, args: ArgArray, x_0: PointArray, draw: bool=True, output_f: bool=False, m: float=100, r0: float=1, rmax: float=2, eta: float=0.2, p1: float=0.4, p2: float=0.6, gamma1: float=0.5, gamma2: float=1.5, epsilon: float=1e-6, k: int=0) -> OutputType | 截断共轭梯度法在此方法中被用于搜索步长 |" ] }, {