from pylab import * from casadi import * # Ex 1.1 x = SX.sym("x") y = sin(x) z = y/x+y f = Function("f",[x],[z]) print(f(0.01)) # 1.00998 # Ex 1.2 y = sin(x).printme(0) z = y/x+y f = Function("f",[x],[z]) f(0.01) # |> 0 : 9.9998333341666645e-03 # The zero is the number you passed on to printme. # Ex 1.3 x = MX.sym("x") y = sqrt(x.attachAssert(x>=0, "That's not allowed")) f = Function("f",[x],[y]) try: f(-3) except Exception as e: print(e) # Ex 1.4 f = Function("f",[x],[jacobian(y,x)]) try: f(-3) except Exception as e: print(e) # Error is still raised # Ex 1.4 x = MX.sym("x") y = if_else(x<=1,x**2,-0.5*x**2+3*x-1.5) f = Function('f',[x],[y]) xs = np.linspace(-4,5) ys = f(xs) plot(xs,ys) show() # Ex 1.6 x = MX.sym("x") y = if_else(x<=1,(x**2).monitor("true-clause"),(-0.5*x**2+3*x-1.5).monitor("false-clause")) f = Function('f',[x],[y]) f(3) # both the true and false clause are evaluated # Ex 1.7 x = MX.sym("x") y = if_else(x<=1,(x**2).monitor("true-clause"),(-0.5*x**2+3*x-1.5).monitor("false-clause"),True) f = Function('f',[x],[y]) f(3) # Only the false-clause is evaluated f(0) # Only the true-clause is evaluated # Ex 2.1 opti = Opti() x = opti.variable() opti.subject_to(sqrt(x)==0.3) opti.set_initial(x, 3) opti.solver("ipopt",{"monitor":["nlp_g"]}) opti.solve() # You can see that x takes negative values # Ipopt proposes a step that makes x negative # After failure (nan computed), linesearch kicks in reducing the step size such that x remains positive # Ex 2.2 DM.set_precision(16) opti.solve() # Ex 2.3 opti.solver("ipopt",{"monitor":["nlp_g"],"show_eval_warnings":False}) opti.solve() # Ex 2.4 opti = Opti() x = opti.variable(2, 2) opti.subject_to(sqrt((x-1).printme(0))==0.3) opti.set_initial(x, 3) opti.solver("ipopt",{"show_eval_warnings":False}) opti.solve() opti = Opti() x = opti.variable(2, 2) opti.subject_to(sqrt((x-1).monitor("under_sqrt"))==0.3) opti.set_initial(x, 3) opti.solver("ipopt",{"show_eval_warnings":False}) opti.solve() # Ex 2.5 opti = Opti() x = opti.variable() y = opti.variable() z = opti.variable() opti.minimize(x**2 + 100*z**2) opti.subject_to(z ==y- (1-x)**2) opti.set_initial(x, 2.5) opti.set_initial(y, 3.0) opti.set_initial(z, 0.75) options = {} options["specific_options"] = dict(nlp_hess_l=dict(final_options=dict(print_in=True,print_out=True))) opti.solver("ipopt",options) sol = opti.solve() print(sol.value(vertcat(x,y,z))) # 0 1 0 # The ideal objective achievable with a quadratic cost is zero, # which is indeed achieved is here with x=0, z=0 # The constraint is not 'in the way' of achieving this. # You could say it is 'inactive' (even though that term is more commonly applied to inequalities). # Its associated multiplier will be zero -> Hessian of Lagrange is here same as Hessian of objective: # -> You expect Hessian diag(2,0,200) # In reality, you get: # [[1.333333333333333, 00, 00], # [00, 00, 00], # [00, 00, 133.3333333333333] ratio = 4.8569874206106939e-55/7.2854811309160408e-55 # 0.666666666 == "Input 2 (lam_f)" # -> lam_f is a scaling factor for the objective contribution to the Hessian of the Lagrange raise Exception() # Ex 2.6 solver = opti.debug.casadi_solver nlp_hess_l = solver.get_function("nlp_hess_l") nlp_hess_l(vertcat(0,1,0),[],1,0) # diag(2,0,200) nlp_hess_l.disp(True) # Ex 2.7 print(nlp_hess_l) args = [] for i in range(nlp_hess_l.n_in()): args.append(SX.sym(nlp_hess_l.name_in(i), nlp_hess_l.sparsity_in(i))) print(nlp_hess_l(*args)) # diag(2*lam_f+2*lam_g,0,200*lam_f) # A whole lot simpler than 'nlp_hess_l.disp(True)' # SX gives more opportunity to simplify in this case. # The 2*100 in the SX output is peculiar. I've opened an issue. # Ex 2.8 options = {} options["qpsol"] = "qrqp" options["qpsol_options"] = dict(print_in=True,print_out=True) opti.solver("sqpmethod",options) opti.solve() # Ex 2.9 options = {} options["qpsol"] = "qrqp" options["qpsol_options"] = dict(print_problem=True) opti.solver("sqpmethod",options) opti.solve() # Ex 2.10 options["qpsol"] = "qrqp" options["qpsol_options"] = dict(dump_in=True) opti.solver("sqpmethod",options) opti.solve() h = DM.from_file("qpsol.000001.in.h.mtx") print("h",h) # Ex 2.11 options = {} options["qpsol"] = "qrqp" options["qpsol_options"] = dict(dump_in=True,dump_format="txt") opti.solver("sqpmethod",options) opti.solve() h = DM.from_file("qpsol.000001.in.h.txt") print("h",h) with open("qpsol.000001.in.h.mtx") as f: print("mtx:") print(f.read()) with open("qpsol.000001.in.h.txt") as f: print("txt:") print(f.read()) # txt is easier to interpret for a human, but will waste a lot of disk space when big sparse matrices are involved. # Ex 2.12 options = {} options["qpsol"] = "qrqp" options["qpsol_options"] = dict(dump_in=True,dump=True,dump_format="txt") opti.solver("sqpmethod",options) opti.solve() qpsol = Function.load("qpsol.casadi") # It's a QP solver! print(qpsol) args = qpsol.generate_in("qpsol.000000.in.txt") print(args) qpsol(*args)