Commit 60b2ca4a authored by iLampard's avatar iLampard

Merge remote-tracking branch 'origin/master'

parents c6d69b4f fbb26530
......@@ -20,6 +20,16 @@
**Alpha - Mind** 是基于 **Python** 开发的股票多因子研究框架。
## 依赖
该项目主要有两个主要的github外部依赖:
* [portfolio - optimizer](https://github.com/alpha-miner/portfolio-optimizer):该项目是相同作者编写的用于资产组合配置的优化器工具包;
* [xgboost](https://github.com/dmlc/xgboost): 该项目是alpha - mind中一些机器模型的基础库。
这两个库都已经使用git子模块的方式打包到alpha-mind代码库中。
## 功能
alpha - mind 提供了多因子研究中常用的工具链,包括:
......
# -*- coding: utf-8 -*-
"""
Created on 2017-5-17
@author: cheng.li
"""
......@@ -33,7 +33,7 @@ def cs_impl(ref_date,
total_risk_exp = total_data[constraint_risk]
er = total_data[[factor_name]].values.astype(float)
er = factor_processing(er, [winsorize_normal, standardize], total_risk_exp.values, [winsorize_normal, standardize]).flatten()
er = factor_processing(er, [winsorize_normal, standardize], total_risk_exp.values, [standardize]).flatten()
industry = total_data.industry_name.values
codes = total_data.code.tolist()
......@@ -46,7 +46,10 @@ def cs_impl(ref_date,
total_risk_exp = target_pos[constraint_risk]
activate_weight = target_pos['weight'].values
excess_return = np.exp(target_pos[['dx']].values) - 1.
excess_return = factor_processing(excess_return, [winsorize_normal, standardize], total_risk_exp.values, [winsorize_normal, standardize]).flatten()
excess_return = factor_processing(excess_return,
[winsorize_normal, standardize],
total_risk_exp.values,
[winsorize_normal, standardize]).flatten()
port_ret = np.log(activate_weight @ excess_return + 1.)
ic = np.corrcoef(excess_return, activate_weight)[0, 1]
x = sm.add_constant(activate_weight)
......@@ -76,10 +79,7 @@ def cross_section_analysis(ref_date,
if __name__ == '__main__':
import numpy as np
import pandas as pd
import statsmodels.api as sm
from alphamind.api import *
from alphamind.api import SqlEngine, Universe, risk_styles, industry_styles
factor_name = 'SIZE'
data_source = 'postgres+psycopg2://postgres:A12345678!@10.63.6.220/alpha'
......
......@@ -14,7 +14,7 @@ from alphamind.data.standardize import standardize
from alphamind.data.winsorize import winsorize_normal
from alphamind.portfolio.constraints import Constraints
from alphamind.portfolio.constraints import LinearConstraints
from alphamind.portfolio.longshortbulder import long_short_build
from alphamind.portfolio.longshortbulder import long_short_builder
from alphamind.portfolio.rankbuilder import rank_build
from alphamind.portfolio.linearbuilder import linear_builder
from alphamind.portfolio.meanvariancebuilder import mean_variance_builder
......@@ -109,7 +109,7 @@ def er_portfolio_analysis(er: np.ndarray,
weights = rank_build(er, use_rank=kwargs['use_rank'], masks=is_tradable).flatten() * benchmark.sum() / kwargs[
'use_rank']
elif method == 'ls' or method == 'long_short':
weights = long_short_build(er).flatten()
weights = long_short_builder(er).flatten()
elif method == 'mv' or method == 'mean_variance':
lbound, ubound, cons_exp, risk_lbound, risk_ubound = create_constraints(benchmark, **kwargs)
cov = kwargs['cov']
......
# -*- coding: utf-8 -*-
"""
Created on 2018-1-15
@author: cheng.li
"""
import numpy as np
from alphamind.data.standardize import standardize
def factor_turn_over(factor_values: np.ndarray,
trade_dates: np.ndarray,
codes: np.ndarray,
use_standize: bool=True):
if use_standize:
factor_values = standardize(factor_values, trade_dates)
if __name__ == '__main__':
from alphamind.api import *
engine = SqlEngine()
factor = 'ep_q'
freq = '5b'
start_date = '2017-06-01'
end_date = '2017-08-01'
universe = Universe('custom', ['zz500'])
......@@ -7,13 +7,14 @@ Created on 2017-7-20
"""
cimport numpy as cnp
from libcpp.string cimport string
from libcpp.vector cimport vector
import numpy as np
cdef extern from "lpoptimizer.hpp" namespace "pfopt":
cdef cppclass LpOptimizer:
LpOptimizer(int, int, double*, double*, double*, double*) except +
LpOptimizer(int, int, double*, double*, double*, double*, string) except +
vector[double] xValue()
double feval()
int status()
......@@ -29,9 +30,12 @@ cdef class LPOptimizer:
cnp.ndarray[double, ndim=2] cons_matrix,
double[:] lbound,
double[:] ubound,
double[:] objective):
double[:] objective,
str method='simplex'):
self.n = lbound.shape[0]
self.m = cons_matrix.shape[0]
py_bytes = method.encode('ascii')
cdef string c_str = py_bytes
cdef double[:] cons = cons_matrix.flatten(order='C');
self.cobj = new LpOptimizer(self.n,
......@@ -39,7 +43,8 @@ cdef class LPOptimizer:
&cons[0],
&lbound[0],
&ubound[0],
&objective[0])
&objective[0],
c_str)
def __dealloc__(self):
del self.cobj
......@@ -151,9 +156,25 @@ cdef extern from "mvoptimizer.hpp" namespace "pfopt":
int status()
cdef extern from "qpalglib.hpp" namespace "pfopt":
cdef cppclass QPAlglib:
QPAlglib(int,
double*,
double*,
double*,
double*,
double) except +
vector[double] xValue()
int status()
cdef class QPOptimizer:
cdef MVOptimizer* cobj
cdef QPAlglib* cobj2
cdef cnp.ndarray er
cdef cnp.ndarray cov
cdef double risk_aversion
cdef int n
cdef int m
......@@ -169,12 +190,15 @@ cdef class QPOptimizer:
self.n = lbound.shape[0]
self.m = 0
self.er = np.array(expected_return)
self.cov = np.array(cov_matrix)
self.risk_aversion = risk_aversion
cdef double[:] cov = cov_matrix.flatten(order='C')
cdef double[:] cons
if cons_matrix is not None:
self.m = cons_matrix.shape[0]
cons = cons_matrix.flatten(order='C');
cons = cons_matrix.flatten(order='C')
self.cobj = new MVOptimizer(self.n,
&expected_return[0],
......@@ -187,25 +211,39 @@ cdef class QPOptimizer:
&cubound[0],
risk_aversion)
else:
self.cobj = new MVOptimizer(self.n,
self.cobj2 = new QPAlglib(self.n,
&expected_return[0],
&cov[0],
&lbound[0],
&ubound[0],
0,
NULL,
NULL,
NULL,
risk_aversion)
def __dealloc__(self):
if self.cobj:
del self.cobj
else:
del self.cobj2
def feval(self):
if self.cobj:
return self.cobj.feval()
else:
x = np.array(self.cobj2.xValue())
return 0.5 * self.risk_aversion * x @ self.cov @ x - self.er @ x
def x_value(self):
if self.cobj:
return np.array(self.cobj.xValue())
else:
return np.array(self.cobj2.xValue())
def status(self):
if self.cobj:
return self.cobj.status()
else:
status = self.cobj2.status()
if 1 <= status <= 4:
return 0
else:
return status
This diff is collapsed.
......@@ -14,8 +14,8 @@ from sqlalchemy import select
from sqlalchemy import join
from sqlalchemy import outerjoin
from alphamind.data.dbmodel.models import Universe as UniverseTable
from alphamind.data.dbmodel.models import FullFactor
from alphamind.data.engines.utilities import _map_factors
from alphamind.data.dbmodel.models import Market
from alphamind.data.engines.utilities import factor_tables
from alphamind.data.transformer import Transformer
from alphamind.utilities import encode
......@@ -86,22 +86,22 @@ class Universe(object):
dependency = transformer.dependency
factor_cols = _map_factors(dependency, factor_tables)
big_table = FullFactor
big_table = Market
for t in set(factor_cols.values()):
if t.__table__.name != FullFactor.__table__.name:
big_table = outerjoin(big_table, t, and_(FullFactor.trade_date == t.trade_date,
FullFactor.code == t.code,
FullFactor.trade_date.in_(
dates) if dates else FullFactor.trade_date.between(
if t.__table__.name != Market.__table__.name:
big_table = outerjoin(big_table, t, and_(Market.trade_date == t.trade_date,
Market.code == t.code,
Market.trade_date.in_(
dates) if dates else Market.trade_date.between(
start_date, end_date)))
big_table = join(big_table, UniverseTable,
and_(FullFactor.trade_date == UniverseTable.trade_date,
FullFactor.code == UniverseTable.code,
and_(Market.trade_date == UniverseTable.trade_date,
Market.code == UniverseTable.code,
universe_cond))
query = select(
[FullFactor.trade_date, FullFactor.code] + list(factor_cols.keys())) \
[Market.trade_date, Market.code] + list(factor_cols.keys())) \
.select_from(big_table).distinct()
df = pd.read_sql(query, engine.engine).sort_values(['trade_date', 'code']).dropna()
......
......@@ -18,10 +18,11 @@ from alphamind.data.dbmodel.models import Gogoal
from alphamind.data.dbmodel.models import Experimental
from alphamind.data.dbmodel.models import LegacyFactor
from alphamind.data.dbmodel.models import Tiny
from alphamind.data.dbmodel.models import RiskExposure
from alphamind.data.engines.industries import INDUSTRY_MAPPING
factor_tables = [Uqer, Gogoal, Experimental, LegacyFactor, Tiny]
factor_tables = [RiskExposure, Uqer, Gogoal, Experimental, LegacyFactor, Tiny]
def _map_risk_model_table(risk_model: str) -> tuple:
......
......@@ -9,6 +9,7 @@ from typing import Optional
from typing import List
import numpy as np
from alphamind.data.neutralize import neutralize
from alphamind.utilities import alpha_logger
def factor_processing(raw_factors: np.ndarray,
......@@ -29,6 +30,8 @@ def factor_processing(raw_factors: np.ndarray,
if post_process:
for p in post_process:
if p.__name__ == 'winsorize_normal':
alpha_logger.warning("winsorize_normal normally should not be done after neutralize")
new_factors = p(new_factors, groups=groups)
return new_factors
Subproject commit d04d8ac4ca33c4482ac2baae9b537a0df0d1b036
Subproject commit 1dcecd88728512ff50730f418d9d58195bf33851
......@@ -17,7 +17,8 @@ def linear_builder(er: np.ndarray,
risk_constraints: np.ndarray,
risk_target: Tuple[np.ndarray, np.ndarray],
turn_over_target: float = None,
current_position: np.ndarray = None) -> Tuple[str, np.ndarray, np.ndarray]:
current_position: np.ndarray = None,
method: str='simplex') -> Tuple[str, np.ndarray, np.ndarray]:
er = er.flatten()
n, m = risk_constraints.shape
......@@ -36,7 +37,7 @@ def linear_builder(er: np.ndarray,
if not turn_over_target:
cons_matrix = np.concatenate((risk_constraints.T, risk_lbound, risk_ubound), axis=1)
opt = LPOptimizer(cons_matrix, lbound, ubound, -er)
opt = LPOptimizer(cons_matrix, lbound, ubound, -er, method)
status = opt.status()
......@@ -77,7 +78,7 @@ def linear_builder(er: np.ndarray,
risk_ubound = np.concatenate((risk_ubound, np.inf * np.ones((n, 1))), axis=0)
cons_matrix = np.concatenate((risk_constraints, risk_lbound, risk_ubound), axis=1)
opt = LPOptimizer(cons_matrix, lbound, ubound, -er)
opt = LPOptimizer(cons_matrix, lbound, ubound, -er, method)
status = opt.status()
......
......@@ -11,15 +11,15 @@ from alphamind.utilities import simple_abssum
from alphamind.utilities import transform
def long_short_build(er: np.ndarray,
leverage: float=1.,
groups: np.ndarray=None,
masks: np.ndarray=None) -> np.ndarray:
def long_short_builder(er: np.ndarray,
leverage: float = 1.,
groups: np.ndarray = None,
masks: np.ndarray = None) -> np.ndarray:
er = er.copy()
if masks is not None:
er[~masks] = 0.
er[masks] = 0.
er[~masks] = er[~masks] - er[~masks].mean()
if er.ndim == 1:
er = er.reshape((-1, 1))
......
......@@ -37,6 +37,25 @@ class TestLinearBuild(unittest.TestCase):
expected_risk = np.zeros(self.risk_exp.shape[1])
np.testing.assert_array_almost_equal(calc_risk, expected_risk)
def test_linear_build_with_interior(self):
bm = self.bm / self.bm.sum()
eplson = 1e-6
status, _, w = linear_builder(self.er,
0.,
0.01,
self.risk_exp,
(bm @ self.risk_exp, bm @ self.risk_exp),
method='interior')
self.assertEqual(status, 'optimal')
self.assertAlmostEqual(np.sum(w), 1.)
self.assertTrue(np.all(w <= 0.01 + eplson))
self.assertTrue(np.all(w >= -eplson))
calc_risk = (w - bm) @ self.risk_exp
expected_risk = np.zeros(self.risk_exp.shape[1])
np.testing.assert_array_almost_equal(calc_risk, expected_risk)
def test_linear_build_with_inequality_constraints(self):
bm = self.bm / self.bm.sum()
eplson = 1e-6
......
......@@ -8,7 +8,7 @@ Created on 2017-5-9
import unittest
import numpy as np
import pandas as pd
from alphamind.portfolio.longshortbulder import long_short_build
from alphamind.portfolio.longshortbulder import long_short_builder
class TestLongShortBuild(unittest.TestCase):
......@@ -17,37 +17,38 @@ class TestLongShortBuild(unittest.TestCase):
self.x = np.random.randn(3000, 10)
self.groups = np.random.randint(10, 40, size=3000)
choices = np.random.choice(3000, 100, replace=False)
self.masks = np.full(3000, True, dtype=bool)
self.masks[choices] = False
self.masks = np.full(3000, False, dtype=bool)
self.masks[choices] = True
def test_long_short_build(self):
x = self.x[:, 0].flatten()
calc_weights = long_short_build(x).flatten()
calc_weights = long_short_builder(x).flatten()
expected_weights = x / np.abs(x).sum()
np.testing.assert_array_almost_equal(calc_weights, expected_weights)
calc_weights = long_short_build(self.x, leverage=2)
calc_weights = long_short_builder(self.x, leverage=2)
expected_weights = self.x / np.abs(self.x).sum(axis=0) * 2
np.testing.assert_array_almost_equal(calc_weights, expected_weights)
def test_long_short_build_with_group(self):
x = self.x[:, 0].flatten()
calc_weights = long_short_build(x, groups=self.groups).flatten()
calc_weights = long_short_builder(x, groups=self.groups).flatten()
expected_weights = pd.Series(x).groupby(self.groups).apply(lambda s: s / np.abs(s).sum())
np.testing.assert_array_almost_equal(calc_weights, expected_weights)
calc_weights = long_short_build(self.x, groups=self.groups)
calc_weights = long_short_builder(self.x, groups=self.groups)
expected_weights = pd.DataFrame(self.x).groupby(self.groups).apply(lambda s: s / np.abs(s).sum(axis=0))
np.testing.assert_array_almost_equal(calc_weights, expected_weights)
def test_long_short_build_with_masks(self):
x = self.x[:, 0].flatten()
calc_weights = long_short_builder(x, masks=self.masks, leverage=1.).flatten()
self.assertAlmostEqual(calc_weights.sum(), 0.)
masked_x = x.copy()
masked_x[~self.masks] = 0.
leverage = np.abs(masked_x).sum()
calc_weights = long_short_build(x, masks=self.masks, leverage=leverage).flatten()
expected_weights = x.copy()
expected_weights[~self.masks] = 0.
masked_x[self.masks] = 0.
masked_x[~self.masks] = masked_x[~self.masks] - masked_x[~self.masks].mean()
expected_weights = masked_x / np.abs(masked_x).sum()
np.testing.assert_array_almost_equal(calc_weights, expected_weights)
......
......@@ -21,7 +21,10 @@ fi
cd ../..
cd alphamind/pfopt
export BUILD_TEST=OFF
./build_linux.sh
if [ $? -ne 0 ] ; then
cd ../..
exit 1
......
......@@ -19,6 +19,7 @@ cd ../..
cd alphamind\pfopt
set BUILD_TEST=OFF
call build_windows.bat
if %errorlevel% neq 0 exit /b 1
......
......@@ -44,19 +44,12 @@
"batch = 0\n",
"horizon = map_freq(freq)\n",
"universe = Universe(\"custom\", ['zz800'])\n",
"data_source = 'postgres+psycopg2://postgres:A12345678!@10.63.6.220/alpha'\n",
"data_source = 'postgres+psycopg2://postgres:we083826@localhost/alpha'\n",
"benchmark_code = 905\n",
"method = 'tv'\n",
"target_vol = 0.05\n",
"risk_model = 'short'\n",
"\n",
"if risk_model == 'day':\n",
" risk_model_name = 'd_srisk'\n",
"elif risk_model == 'short':\n",
" risk_model_name = 's_srisk'\n",
"else:\n",
" risk_model_name = 'l_srisk'\n",
"\n",
"executor = NaiveExecutor()\n",
"ref_dates = makeSchedule(start_date, end_date, freq, 'china.sse')\n",
"engine = SqlEngine(data_source)"
......@@ -182,7 +175,7 @@
" \n",
" risk_exposure = res[all_styles].values\n",
" risk_cov = risk_cov[all_styles].values\n",
" special_risk = res[risk_model_name].values\n",
" special_risk = res.srisk.values\n",
" sec_cov = risk_exposure @ risk_cov @ risk_exposure.T / 10000 + np.diag(special_risk ** 2) / 10000\n",
"\n",
" benchmark_w = res.weight.values\n",
......@@ -276,9 +269,9 @@
"outputs": [],
"source": [
"def create_report(ret_df, windows):\n",
" sharp_calc = MovingSharp(windows)\n",
" drawdown_calc = MovingMaxDrawdown(windows)\n",
" max_drawdown_calc = MovingMaxDrawdown(len(ret_df))\n",
" sharp_calc = MovingSharp(windows, x='ret', y='riskFree')\n",
" drawdown_calc = MovingMaxDrawdown(windows, x='ret')\n",
" max_drawdown_calc = MovingMaxDrawdown(len(ret_df), x='ret')\n",
"\n",
" res_df = pd.DataFrame(columns=['daily_return', 'cum_ret', 'sharp', 'drawdown', 'max_drawn', 'leverage'])\n",
" total_returns = 0.\n",
......@@ -292,8 +285,8 @@
"\n",
" res_df.loc[date, 'daily_return'] = ret\n",
" res_df.loc[date, 'cum_ret'] = total_returns\n",
" res_df.loc[date, 'drawdown'] = drawdown_calc.result()[0]\n",
" res_df.loc[date, 'max_drawn'] = max_drawdown_calc.result()[0]\n",
" res_df.loc[date, 'drawdown'] = drawdown_calc.result()\n",
" res_df.loc[date, 'max_drawn'] = max_drawdown_calc.result()\n",
" res_df.loc[date, 'leverage'] = ret_df.loc[date, 'leverage']\n",
"\n",
" if i < 5:\n",
......@@ -335,13 +328,6 @@
" alpha_logger.info(f\"target_vol: {target_vol:.4f} finished\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
......@@ -356,18 +342,6 @@
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
},
"varInspector": {
"cols": {
"lenName": 16,
......
......@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
......@@ -15,7 +15,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
......@@ -24,7 +24,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
......@@ -64,32 +64,9 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Scale(n) time(ms) feval min(x) max(x) sum(x) x(0) + x(1)\n",
"200 39.72 -0.82 0.000000 0.010000 1.000000 0.015\n",
"400 25.20 -1.28 0.000000 0.010000 1.000000 0.015\n",
"600 29.23 -1.54 0.000000 0.010000 1.000000 0.015\n",
"800 32.27 -1.63 0.000000 0.010000 1.000000 0.015\n",
"1000 15.13 -1.72 0.000000 0.010000 1.000000 0.015\n",
"1200 16.79 -1.81 0.000000 0.010000 1.000000 0.015\n",
"1400 18.62 -1.90 0.000000 0.010000 1.000000 0.015\n",
"1600 20.90 -1.96 0.000000 0.010000 1.000000 0.015\n",
"1800 24.02 -2.03 0.000000 0.010000 1.000000 0.015\n",
"2000 27.05 -2.06 0.000000 0.010000 1.000000 0.015\n",
"2200 28.04 -2.07 0.000000 0.010000 1.000000 0.015\n",
"2400 30.25 -2.13 0.000000 0.010000 1.000000 0.015\n",
"2600 31.96 -2.14 0.000000 0.010000 1.000000 0.015\n",
"2800 34.44 -2.16 0.000000 0.010000 1.000000 0.015\n",
"3000 36.86 -2.19 0.000000 0.010000 1.000000 0.015\n"
]
}
],
"outputs": [],
"source": [
"print(\"{0:<8}{1:>12}{2:>12}{3:>12}{4:>12}{5:>12}{6:>15}\".format('Scale(n)', 'time(ms)', 'feval', 'min(x)', 'max(x)', 'sum(x)', 'x(0) + x(1)'))\n",
"\n",
......@@ -101,7 +78,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
......@@ -124,32 +101,9 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Scale(n) time(ms) feval min(x) max(x) sum(x) x(0) + x(1)\n",
"200 2.95 -0.82 0.000000 0.010000 1.000000 0.015\n",
"400 2.34 -1.28 0.000000 0.010000 1.000000 0.015\n",
"600 2.44 -1.54 0.000000 0.010000 1.000000 0.015\n",
"800 2.91 -1.63 0.000000 0.010000 1.000000 0.015\n",
"1000 7.58 -1.72 0.000000 0.010000 1.000000 0.015\n",
"1200 3.89 -1.81 0.000000 0.010000 1.000000 0.015\n",
"1400 4.22 -1.90 0.000000 0.010000 1.000000 0.015\n",
"1600 4.37 -1.96 0.000000 0.010000 1.000000 0.015\n",
"1800 4.81 -2.03 0.000000 0.010000 1.000000 0.015\n",
"2000 4.98 -2.06 0.000000 0.010000 1.000000 0.015\n",
"2200 5.31 -2.07 0.000000 0.010000 1.000000 0.015\n",
"2400 6.13 -2.13 0.000000 0.010000 1.000000 0.015\n",
"2600 6.12 -2.14 0.000000 0.010000 1.000000 0.015\n",
"2800 6.73 -2.16 0.000000 0.010000 1.000000 0.015\n",
"3000 7.39 -2.19 0.000000 0.010000 1.000000 0.015\n"
]
}
],
"outputs": [],
"source": [
"print(\"{0:<8}{1:>12}{2:>12}{3:>12}{4:>12}{5:>12}{6:>15}\".format('Scale(n)', 'time(ms)', 'feval', 'min(x)', 'max(x)', 'sum(x)', 'x(0) + x(1)'))\n",
"\n",
......@@ -162,9 +116,7 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"metadata": {},
"outputs": [],
"source": []
}
......@@ -186,6 +138,35 @@
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
},
"varInspector": {
"cols": {
"lenName": 16,
"lenType": 16,
"lenVar": 40
},
"kernels_config": {
"python": {
"delete_cmd_postfix": "",
"delete_cmd_prefix": "del ",
"library": "var_list.py",
"varRefreshCmd": "print(var_dic_list())"
},
"r": {
"delete_cmd_postfix": ") ",
"delete_cmd_prefix": "rm(",
"library": "var_list.r",
"varRefreshCmd": "cat(var_dic_list()) "
}
},
"types_to_exclude": [
"module",
"function",
"builtin_function_or_method",
"instance",
"_Feature"
],
"window_display": false
}
},
"nbformat": 4,
......
......@@ -51,7 +51,16 @@ else:
"alphamind/pfopt/include/pfopt",
"alphamind/pfopt/include/eigen",
"alphamind/pfopt/include/alglib"],
libraries=['pfopt', 'alglib', 'libClp', 'libCoinUtils', 'libipopt', 'libcoinhsl', 'libcoinblas', 'libcoinlapack', 'libcoinmetis'],
libraries=['pfopt',
'alglib',
'libClp',
'libCoinUtils',
'libipopt',
'libcoinhsl',
'libcoinblas',
'libcoinlapack',
'libcoinmetis',
'libcoinmumps'],
library_dirs=['alphamind/pfopt/lib'],
extra_compile_args=['/MD']),
]
......
Subproject commit bf4367184164e593cd2856ef38f8dd4f8cc76999
Subproject commit a187ed6c8f3aa40b47d5be80667cbbe6a6fd563d
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment