below are the codes
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def objective(x):
x1 = x[0]
x2 = x[1]
x3 = x[2]
x4 = x[3]
return x1*x4*(x1+x2+x3)+x3
def const1(x):
return x[0]*x[1]*x[2]*x[3]-25
def const2(x):
sum_sq = 40
for i in range(4):
sum_sq = sum_sq - x[i]**2
return sum_sq
x0 = [1,5,5,1]
b = (1,5)
bnds = (b,b,b,b)
cons1 = {'type':'ineq','fun':const1}
cons2 = {'type':'eq','fun':const2}
cons = [cons1,cons2]
sol = minimize(objective,x0,method='SLSQP',bounds=bnds,constraints=cons)
print(sol)
I have defined my bounds here between 1 and 5 (b = (1,5))
but, what if I wanted my variables to have only specific values i.e. either 3,4 or 5. i.e. each of the 4 variables can have values 3,4 or 5. Is it possible?
This is an example of MINLP (mixed-integer nonlinear programming). To my knowledge the only scipy optimization method that supports this is differential_evolution
.
Your current sum_sq
constraint will fail if you strictly enforce that it's equal to 40. That needs to be loosened for the integer solution.
Provide your Jacobians, change your incorrect bounds (you have two conflicting lower bound descriptions of 1 and 3 respectively), change your incorrect initial values so that they're compatible with your bounds, and it works fine:
import numpy as np
from scipy.optimize import check_grad, differential_evolution, minimize, NonlinearConstraint
def objective(x: np.ndarray) -> float | np.ndarray:
x1, x2, x3, x4 = x
return x1*x4*(x1 + x2 + x3) + x3
def jac_obj(x: np.ndarray) -> tuple[float | np.ndarray, ...]:
x1, x2, x3, x4 = x
return (
x4*(x1 + x2 + x3) + x1*x4,
x1*x4,
x1*x4 + 1,
x1*(x1 + x2 + x3),
)
def const_min_prod(x: np.ndarray) -> float | np.ndarray:
return x.prod(axis=0, keepdims=True)
def jac_min_prod(x: np.ndarray) -> tuple[float | np.ndarray, ...]:
x1, x2, x3, x4 = x
return (
x2*x3*x4,
x1 *x3*x4,
x1*x2 *x4,
x1*x2*x3,
)
def const_sum_sq(x: np.ndarray) -> float | np.ndarray:
return (x*x).sum(axis=0, keepdims=True)
def jac_sum_sq(x: np.ndarray) -> np.ndarray:
return 2*x
x0 = np.array((
3,
np.sqrt(40 - 3*9), # sumsq feasibility
3, 3,
))
bounds = ((3, 5),) * x0.size
def constraints(eq_epsilon: float = 0) -> tuple[NonlinearConstraint, ...]:
return (
NonlinearConstraint(
fun=const_min_prod, lb=25, ub=np.inf, jac=jac_min_prod,
),
NonlinearConstraint(
fun=const_sum_sq, lb=40 - eq_epsilon, ub=40 + eq_epsilon, jac=jac_sum_sq,
),
)
assert check_grad(x0=x0, func=objective, grad=jac_obj ) < 1e-4
assert check_grad(x0=x0, func=const_min_prod, grad=jac_min_prod) < 1e-4
assert check_grad(x0=x0, func=const_sum_sq, grad=jac_sum_sq ) < 1e-4
def continuous_optimize():
sol = minimize(
fun=objective, method='SLSQP', jac=jac_obj,
x0=x0, bounds=bounds, constraints=constraints(),
)
print('Continuous:')
assert sol.success, sol.message
print(sol, end='\n\n')
def discrete_optimize():
sol = differential_evolution(
func=objective, vectorized=True, integrality=True,
x0=x0, bounds=bounds, constraints=constraints(eq_epsilon=3),
)
print('Discrete:')
assert sol.success, sol.message
print(sol)
if __name__ == '__main__':
continuous_optimize()
discrete_optimize()
Continuous:
message: Optimization terminated successfully
success: True
status: 0
fun: 89.44996147917591
x: [ 3.000e+00 3.606e+00 3.000e+00 3.000e+00]
nit: 1
jac: [ 3.782e+01 9.000e+00 1.000e+01 2.882e+01]
nfev: 1
njev: 1
Discrete:
message: Optimization terminated successfully.
success: True
fun: 93.0
x: [ 3.000e+00 4.000e+00 3.000e+00 3.000e+00]
nit: 19
nfev: 20
constr: [array([ 0.000e+00]), array([ 0.000e+00])]
constr_violation: 0.0
maxcv: 0.0
This will need (a lot of) tuning for the various parameters of differential_evolution
if and when your problem changes.