import numpy as np
# Coefficients of the objective function (Minimize: 3x1 + 2x2 - 4x3)
c = np.array([3, 2, -4])
# Coefficients of the inequality constraints (Ax >= b)
A = np.array([[5, -1, 3],
[-4, 2, 5],
[2, 5, -6]])
b = np.array([8, 4, 5])
# Bounds for variables (x1, x2, x3 >= 0)
x_bounds = np.array([0, 0, 0])
# Initialize variables with a feasible starting point
x = np.array([1, 1, 1], dtype=float) # Initial guess
alpha = 0.1 # Step size
def objective(x):
return np.dot(c, x)
def gradient(A, b, x):
return np.dot(A.T, np.linalg.solve(A @ A.T, A @ x - b))
# Iterative optimization using gradient descent-like approach
max_iterations = 1000
tolerance = 1e-6
for i in range(max_iterations):
grad = gradient(A, b, x)
x_new = x - alpha * grad
x_new = np.maximum(x_new, x_bounds) # Ensure non-negativity
if np.linalg.norm(x_new - x) < tolerance:
break
x = x_new
# Output results
if np.all(A @ x >= b):
print("Optimal solution found:")
print("x1 =", x[0])
print("x2 =", x[1])
print("x3 =", x[2])
print("Optimal value:", objective(x))
else:
print("The problem is unbounded.")
aW1wb3J0IG51bXB5IGFzIG5wCgojIENvZWZmaWNpZW50cyBvZiB0aGUgb2JqZWN0aXZlIGZ1bmN0aW9uIChNaW5pbWl6ZTogM3gxICsgMngyIC0gNHgzKQpjID0gbnAuYXJyYXkoWzMsIDIsIC00XSkKCiMgQ29lZmZpY2llbnRzIG9mIHRoZSBpbmVxdWFsaXR5IGNvbnN0cmFpbnRzIChBeCA+PSBiKQpBID0gbnAuYXJyYXkoW1s1LCAtMSwgM10sCiAgICAgICAgICAgICAgWy00LCAyLCA1XSwKICAgICAgICAgICAgICBbMiwgNSwgLTZdXSkKYiA9IG5wLmFycmF5KFs4LCA0LCA1XSkKCiMgQm91bmRzIGZvciB2YXJpYWJsZXMgKHgxLCB4MiwgeDMgPj0gMCkKeF9ib3VuZHMgPSBucC5hcnJheShbMCwgMCwgMF0pCgojIEluaXRpYWxpemUgdmFyaWFibGVzIHdpdGggYSBmZWFzaWJsZSBzdGFydGluZyBwb2ludAp4ID0gbnAuYXJyYXkoWzEsIDEsIDFdLCBkdHlwZT1mbG9hdCkgICMgSW5pdGlhbCBndWVzcwphbHBoYSA9IDAuMSAgIyBTdGVwIHNpemUKCmRlZiBvYmplY3RpdmUoeCk6CiAgICByZXR1cm4gbnAuZG90KGMsIHgpCgpkZWYgZ3JhZGllbnQoQSwgYiwgeCk6CiAgICByZXR1cm4gbnAuZG90KEEuVCwgbnAubGluYWxnLnNvbHZlKEEgQCBBLlQsIEEgQCB4IC0gYikpCgojIEl0ZXJhdGl2ZSBvcHRpbWl6YXRpb24gdXNpbmcgZ3JhZGllbnQgZGVzY2VudC1saWtlIGFwcHJvYWNoCm1heF9pdGVyYXRpb25zID0gMTAwMAp0b2xlcmFuY2UgPSAxZS02Cgpmb3IgaSBpbiByYW5nZShtYXhfaXRlcmF0aW9ucyk6CiAgICBncmFkID0gZ3JhZGllbnQoQSwgYiwgeCkKICAgIHhfbmV3ID0geCAtIGFscGhhICogZ3JhZAogICAgeF9uZXcgPSBucC5tYXhpbXVtKHhfbmV3LCB4X2JvdW5kcykgICMgRW5zdXJlIG5vbi1uZWdhdGl2aXR5CiAgICAKICAgIGlmIG5wLmxpbmFsZy5ub3JtKHhfbmV3IC0geCkgPCB0b2xlcmFuY2U6CiAgICAgICAgYnJlYWsKICAgIHggPSB4X25ldwoKIyBPdXRwdXQgcmVzdWx0cwppZiBucC5hbGwoQSBAIHggPj0gYik6CiAgICBwcmludCgiT3B0aW1hbCBzb2x1dGlvbiBmb3VuZDoiKQogICAgcHJpbnQoIngxID0iLCB4WzBdKQogICAgcHJpbnQoIngyID0iLCB4WzFdKQogICAgcHJpbnQoIngzID0iLCB4WzJdKQogICAgcHJpbnQoIk9wdGltYWwgdmFsdWU6Iiwgb2JqZWN0aXZlKHgpKQplbHNlOgogICAgcHJpbnQoIlRoZSBwcm9ibGVtIGlzIHVuYm91bmRlZC4iKQo=