Commit 94e6ee52 authored by Dr.李's avatar Dr.李

merge 2 implementation

parent 2c94271e
......@@ -15,19 +15,18 @@ def linear_build(er: np.ndarray,
lbound: Union[np.ndarray, float],
ubound: Union[np.ndarray, float],
risk_constraints: np.ndarray,
risk_target: Tuple[np.ndarray, np.ndarray]) -> Tuple[str, np.ndarray, np.ndarray]:
risk_target: Tuple[np.ndarray, np.ndarray],
turn_over_target: float = None,
current_position: np.ndarray = None) -> Tuple[str, np.ndarray, np.ndarray]:
er = er.flatten()
n, m = risk_constraints.shape
if not risk_target:
risk_lbound = -np.inf * np.ones(m)
risk_ubound = np.inf * np.ones(m)
cons_matrix = np.concatenate((risk_constraints.T, risk_lbound.reshape((-1, 1)), risk_ubound.reshape((-1, 1))),
axis=1)
risk_lbound = -np.inf * np.ones((m, 1))
risk_ubound = np.inf * np.ones((m, 1))
else:
cons_matrix = np.concatenate(
(risk_constraints.T, risk_target[0].reshape((-1, 1)), risk_target[1].reshape((-1, 1))),
axis=1)
risk_lbound = risk_target[0].reshape((-1, 1))
risk_ubound = risk_target[1].reshape((-1, 1))
if isinstance(lbound, float):
lbound = np.ones(n) * lbound
......@@ -35,6 +34,8 @@ def linear_build(er: np.ndarray,
if isinstance(ubound, float):
ubound = np.ones(n) * ubound
if not turn_over_target:
cons_matrix = np.concatenate((risk_constraints.T, risk_lbound, risk_ubound), axis=1)
opt = LPOptimizer(cons_matrix, lbound, ubound, -er)
status = opt.status()
......@@ -43,31 +44,8 @@ def linear_build(er: np.ndarray,
status = 'optimal'
return status, opt.feval(), opt.x_value()
def linear_build_with_to_constraint(er: np.ndarray,
lbound: Union[np.ndarray, float],
ubound: Union[np.ndarray, float],
risk_constraints: np.ndarray,
risk_target: Tuple[np.ndarray, np.ndarray],
turn_over_target: float,
current_position: np.ndarray):
er = er.flatten()
current_position = current_position.reshape((-1, 1))
n, m = risk_constraints.shape
if not risk_target:
risk_lbound = -np.inf * np.ones((m, 1))
risk_ubound = np.inf * np.ones((m, 1))
else:
risk_lbound = risk_target[0].reshape((-1, 1))
risk_ubound = risk_target[1].reshape((-1, 1))
if isinstance(lbound, float):
lbound = np.ones(n) * lbound
if isinstance(ubound, float):
ubound = np.ones(n) * ubound
current_position = current_position.reshape((-1, 1))
# we need to expand bounded condition and constraint matrix to handle L1 bound
lbound = np.concatenate((lbound, np.zeros(n)), axis=0)
......@@ -122,7 +100,7 @@ if __name__ == '__main__':
risk_lbound = np.ones(1)
risk_ubound = np.ones(1)
status, fvalue, x_values = linear_build_with_to_constraint(er,
status, fvalue, x_values = linear_build(er,
lb,
ub,
cons,
......
......@@ -8,7 +8,6 @@ Created on 2017-5-5
import unittest
import numpy as np
from alphamind.portfolio.linearbuilder import linear_build
from alphamind.portfolio.linearbuilder import linear_build_with_to_constraint
class TestLinearBuild(unittest.TestCase):
......@@ -76,7 +75,7 @@ class TestLinearBuild(unittest.TestCase):
risk_lbound[:-1] = risk_lbound[:-1] - risk_tolerance
risk_ubound[:-1] = risk_ubound[:-1] + risk_tolerance
status, _, w = linear_build_with_to_constraint(self.er,
status, _, w = linear_build(self.er,
0.,
0.01,
self.risk_exp,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment