Commit d69af2fe authored by Dr.李's avatar Dr.李
parents 644bda95 6b352970
......@@ -3,7 +3,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"%matplotlib inline\n",
......@@ -23,7 +25,7 @@
"outputs": [],
"source": [
"# Back test parameter settings\n",
"start_date = '2010-01-01'\n",
"start_date = '2017-01-01'\n",
"end_date = '2018-02-24'\n",
"\n",
"freq = '10b'\n",
......@@ -39,9 +41,19 @@
"horizon = map_freq(freq)\n",
"weight_gap = 0.01\n",
"universe = Universe(\"custom\", ['zz800'])\n",
"data_source = 'postgres+psycopg2://postgres:A12345678!@10.63.6.220/alpha'\n",
"data_source = 'postgres+psycopg2://postgres:we083826@192.168.0.102/alpha'\n",
"benchmark_code = 905\n",
"offset = 0\n",
"offset = 1\n",
"method = 'risk_neutral'\n",
"target_vol = 0.05\n",
"risk_model = 'short'\n",
"\n",
"if risk_model == 'day':\n",
" risk_model_name = 'd_srisk'\n",
"elif risk_model == 'short':\n",
" risk_model_name = 's_srisk'\n",
"else:\n",
" risk_model_name = 'l_srisk'\n",
"\n",
"executor = NaiveExecutor()\n",
"ref_dates = makeSchedule(start_date, end_date, freq, 'china.sse')\n",
......@@ -51,7 +63,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"alpha_factors = {\n",
......@@ -118,7 +132,7 @@
"# training / predict on dask executor\n",
"\n",
"from dask.distributed import Client\n",
"client = Client('10.63.6.176:8786')\n",
"client = Client('192.168.0.102:8786')\n",
"\n",
"tasks = client.map(predict_worker, [(d.strftime('%Y-%m-%d'), alpha_model) for d in ref_dates], pure=False)\n",
"predicts = client.gather(tasks)"
......@@ -135,6 +149,7 @@
"industry_names = industry_list(industry_name, industry_level)\n",
"constraint_risk = ['SIZE', 'SIZENL', 'BETA'] + industry_names\n",
"total_risk_names = constraint_risk + ['benchmark', 'total']\n",
"all_styles = risk_styles + industry_styles + macro_styles\n",
"\n",
"b_type = []\n",
"l_val = []\n",
......@@ -163,19 +178,25 @@
"\n",
"industry_total = engine.fetch_industry_matrix_range(universe, dates=ref_dates, category=industry_name, level=industry_level)\n",
"benchmark_total = engine.fetch_benchmark_range(dates=ref_dates, benchmark=benchmark_code)\n",
"risk_total = engine.fetch_risk_model_range(universe, dates=ref_dates)[1]\n",
"risk_cov_total, risk_exposure_total = engine.fetch_risk_model_range(universe, dates=ref_dates, risk_model=risk_model)\n",
"\n",
"for i, ref_date in enumerate(ref_dates):\n",
" ref_date = ref_date.strftime('%Y-%m-%d')\n",
" industry_matrix = industry_total[industry_total.trade_date == ref_date]\n",
" benchmark_w = benchmark_total[benchmark_total.trade_date == ref_date]\n",
" risk_matrix = risk_total[risk_total.trade_date == ref_date]\n",
" risk_exposure = risk_exposure_total[risk_exposure_total.trade_date == ref_date]\n",
" risk_cov = risk_cov_total[risk_cov_total.trade_date == ref_date]\n",
" \n",
" res = pd.merge(industry_matrix, benchmark_w, on=['code'], how='left').fillna(0.)\n",
" res = pd.merge(res, risk_matrix, on=['code'])\n",
" res = pd.merge(res, risk_exposure, on=['code'])\n",
" res = res.dropna()\n",
" codes = res.code.values.tolist()\n",
" \n",
" risk_exposure = res[all_styles].values\n",
" risk_cov = risk_cov[all_styles].values\n",
" special_risk = res[risk_model_name].values\n",
" sec_cov = risk_exposure @ risk_cov @ risk_exposure.T / 10000 + np.diag(special_risk ** 2) / 10000\n",
" \n",
" benchmark_w = res.weight.values\n",
" is_in_benchmark = (benchmark_w > 0.).astype(float).reshape((-1, 1))\n",
" \n",
......@@ -210,11 +231,13 @@
" constraints,\n",
" False,\n",
" benchmark_w,\n",
" method='risk_neutral',\n",
" method=method,\n",
" turn_over_target=turn_over_target,\n",
" current_position=current_position,\n",
" lbound=lbound,\n",
" ubound=ubound)\n",
" ubound=ubound,\n",
" target_vol=target_vol,\n",
" cov=sec_cov)\n",
" except ValueError:\n",
" alpha_logger.info('{0} full re-balance: {1}'.format(ref_date, len(er)))\n",
" target_pos, _ = er_portfolio_analysis(er,\n",
......@@ -223,9 +246,11 @@
" constraints,\n",
" False,\n",
" benchmark_w,\n",
" method='risk_neutral',\n",
" method=method,\n",
" lbound=lbound,\n",
" ubound=ubound)\n",
" ubound=ubound,\n",
" target_vol=target_vol,\n",
" cov=sec_co)\n",
" \n",
" target_pos['code'] = codes\n",
" turn_over, executed_pos = executor.execute(target_pos=target_pos)\n",
......@@ -274,7 +299,71 @@
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"tornado.application - ERROR - Exception in callback functools.partial(<function wrap.<locals>.null_wrapper at 0x000001C2002EDD90>, <tornado.concurrent.Future object at 0x000001C2054252E8>)\n",
"Traceback (most recent call last):\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\distributed\\comm\\core.py\", line 185, in connect\n",
" quiet_exceptions=EnvironmentError)\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\gen.py\", line 1055, in run\n",
" value = future.result()\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\concurrent.py\", line 238, in result\n",
" raise_exc_info(self._exc_info)\n",
" File \"<string>\", line 4, in raise_exc_info\n",
"tornado.gen.TimeoutError: Timeout\n",
"\n",
"During handling of the above exception, another exception occurred:\n",
"\n",
"Traceback (most recent call last):\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\ioloop.py\", line 605, in _run_callback\n",
" ret = callback()\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\stack_context.py\", line 277, in null_wrapper\n",
" return fn(*args, **kwargs)\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\ioloop.py\", line 626, in _discard_future_result\n",
" future.result()\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\concurrent.py\", line 238, in result\n",
" raise_exc_info(self._exc_info)\n",
" File \"<string>\", line 4, in raise_exc_info\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\gen.py\", line 1063, in run\n",
" yielded = self.gen.throw(*exc_info)\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\distributed\\client.py\", line 804, in _update_scheduler_info\n",
" self._scheduler_identity = yield self.scheduler.identity()\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\gen.py\", line 1055, in run\n",
" value = future.result()\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\concurrent.py\", line 238, in result\n",
" raise_exc_info(self._exc_info)\n",
" File \"<string>\", line 4, in raise_exc_info\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\gen.py\", line 1063, in run\n",
" yielded = self.gen.throw(*exc_info)\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\distributed\\core.py\", line 463, in send_recv_from_rpc\n",
" comm = yield self.live_comm()\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\gen.py\", line 1055, in run\n",
" value = future.result()\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\concurrent.py\", line 238, in result\n",
" raise_exc_info(self._exc_info)\n",
" File \"<string>\", line 4, in raise_exc_info\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\gen.py\", line 1063, in run\n",
" yielded = self.gen.throw(*exc_info)\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\distributed\\core.py\", line 439, in live_comm\n",
" connection_args=self.connection_args)\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\gen.py\", line 1055, in run\n",
" value = future.result()\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\concurrent.py\", line 238, in result\n",
" raise_exc_info(self._exc_info)\n",
" File \"<string>\", line 4, in raise_exc_info\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\tornado\\gen.py\", line 1063, in run\n",
" yielded = self.gen.throw(*exc_info)\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\distributed\\comm\\core.py\", line 194, in connect\n",
" _raise(error)\n",
" File \"D:\\ProgramData\\IntelPython3_2018\\lib\\site-packages\\distributed\\comm\\core.py\", line 177, in _raise\n",
" raise IOError(msg)\n",
"OSError: Timed out trying to connect to 'tcp://192.168.0.102:8786' after 5 s: connect() didn't finish in time\n"
]
}
],
"source": [
"ret_df[['returns', 'tc_cost']][-30:].cumsum().plot(figsize=(12, 6),\n",
" title='Fixed freq rebalanced: {0} with benchmark {1}'.format(freq, 905),\n",
......@@ -284,7 +373,18 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
......@@ -295,18 +395,6 @@
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
},
"varInspector": {
"cols": {
"lenName": 16,
......
......@@ -3,7 +3,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"%matplotlib inline\n",
......@@ -44,8 +46,18 @@
"batch = 0\n",
"horizon = map_freq(freq)\n",
"universe = Universe(\"custom\", ['zz800'])\n",
"data_source = 'postgres+psycopg2://postgres:A12345678!@10.63.6.220/alpha'\n",
"benchmark_code = 300\n",
"data_source = 'postgres+psycopg2://postgres:we083826@192.168.0.102/alpha'\n",
"benchmark_code = 905\n",
"method = 'tv'\n",
"target_vol = 0.05\n",
"risk_model = 'short'\n",
"\n",
"if risk_model == 'day':\n",
" risk_model_name = 'd_srisk'\n",
"elif risk_model == 'short':\n",
" risk_model_name = 's_srisk'\n",
"else:\n",
" risk_model_name = 'l_srisk'\n",
"\n",
"executor = NaiveExecutor()\n",
"ref_dates = makeSchedule(start_date, end_date, freq, 'china.sse')\n",
......@@ -62,19 +74,41 @@
"Factor Model\n",
"\"\"\"\n",
"\n",
"alpha_factors = {\n",
" 'f01': LAST('ep_q'),\n",
" 'f02': LAST('roe_q'),\n",
" 'f03': LAST('market_confidence_25d'),\n",
" 'f04': LAST('ILLIQUIDITY'),\n",
" 'f05': LAST('cfinc1_q'),\n",
" 'f06': LAST('CFO2EV'),\n",
" 'f07': LAST('IVR'),\n",
" 'f08': LAST('con_pe_rolling_order'),\n",
" 'f09': LAST('con_pb_rolling_order')\n",
" }\n",
"\n",
"\n",
"weights = dict(f01=1.,\n",
" f02=0.5,\n",
" f03=0.5,\n",
" f04=0.5,\n",
" f05=0.5,\n",
" f06=0.5,\n",
" f07=0.5,\n",
" f08=-0.5,\n",
" f09=-0.5)\n",
"\n",
"# alpha_factors = {\n",
"# 'f01': LAST('ep_q'),\n",
"# 'f02': LAST('roe_q'),\n",
"# 'f03': LAST('market_confidence_25d'),\n",
"# 'f04': LAST('ILLIQUIDITY'),\n",
"# 'f05': LAST('cfinc1_q'),\n",
"# 'f06': LAST('CFO2EV'),\n",
"# 'f07': LAST('IVR'),\n",
"# 'f03': LAST('market_confidence_75d'),\n",
"# 'f04': LAST('DivP'),\n",
"# 'f05': LAST('val_q'),\n",
"# 'f06': LAST('con_np_rolling'),\n",
"# 'f07': LAST('GREV'),\n",
"# 'f08': LAST('con_pe_rolling_order'),\n",
"# 'f09': LAST('con_pb_rolling_order')\n",
"# }\n",
"\n",
"\n",
"# weights = dict(f01=1.,\n",
"# f02=0.5,\n",
"# f03=0.5,\n",
......@@ -85,28 +119,6 @@
"# f08=-0.5,\n",
"# f09=-0.5)\n",
"\n",
"alpha_factors = {\n",
" 'f01': LAST('ep_q'),\n",
" 'f02': LAST('roe_q'),\n",
" 'f03': LAST('market_confidence_75d'),\n",
" 'f04': LAST('DivP'),\n",
" 'f05': LAST('val_q'),\n",
" 'f06': LAST('con_np_rolling'),\n",
" 'f07': LAST('GREV'),\n",
" 'f08': LAST('con_pe_rolling_order'),\n",
" 'f09': LAST('con_pb_rolling_order')\n",
"}\n",
"\n",
"weights = dict(f01=1.,\n",
" f02=0.5,\n",
" f03=0.5,\n",
" f04=0.5,\n",
" f05=0.5,\n",
" f06=0.5,\n",
" f07=0.5,\n",
" f08=-0.5,\n",
" f09=-0.5)\n",
"\n",
"alpha_model = ConstLinearModel(features=alpha_factors, weights=weights)\n",
"\n",
"def predict_worker(params):\n",
......@@ -152,7 +164,7 @@
"\"\"\"\n",
"\n",
"from dask.distributed import Client\n",
"client = Client('10.63.6.176:8786')\n",
"client = Client('192.168.0.102:8786')\n",
"\n",
"tasks = client.map(predict_worker, [(d.strftime('%Y-%m-%d'), alpha_model) for d in ref_dates], pure=False)\n",
"predicts = client.gather(tasks)\n",
......@@ -162,7 +174,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"\"\"\"\n",
......@@ -172,6 +186,7 @@
"industry_names = industry_list(industry_name, industry_level)\n",
"constraint_risk = ['SIZE', 'SIZENL', 'BETA'] + industry_names\n",
"total_risk_names = constraint_risk + ['benchmark', 'total']\n",
"all_styles = risk_styles + industry_styles + macro_styles\n",
"\n",
"b_type = []\n",
"l_val = []\n",
......@@ -194,19 +209,21 @@
"bounds = create_box_bounds(total_risk_names, b_type, l_val, u_val)\n",
"industry_total = engine.fetch_industry_matrix_range(universe, dates=ref_dates, category=industry_name, level=industry_level)\n",
"benchmark_total = engine.fetch_benchmark_range(dates=ref_dates, benchmark=benchmark_code)\n",
"risk_total = engine.fetch_risk_model_range(universe, dates=ref_dates)[1]\n",
"risk_cov_total, risk_exposure_total = engine.fetch_risk_model_range(universe, dates=ref_dates, risk_model=risk_model)\n",
"index_return = engine.fetch_dx_return_index_range(benchmark_code, start_date, end_date, horizon=horizon, offset=1).set_index('trade_date')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# rebalance\n",
"\n",
"def create_scenario(weight_gap):\n",
"def create_scenario(weight_gap, target_vol, method):\n",
"\n",
" previous_pos = pd.DataFrame()\n",
" rets = []\n",
......@@ -217,12 +234,18 @@
" ref_date = ref_date.strftime('%Y-%m-%d')\n",
" industry_matrix = industry_total[industry_total.trade_date == ref_date]\n",
" benchmark_w = benchmark_total[benchmark_total.trade_date == ref_date]\n",
" risk_matrix = risk_total[risk_total.trade_date == ref_date]\n",
" risk_exposure = risk_exposure_total[risk_exposure_total.trade_date == ref_date]\n",
" risk_cov = risk_cov_total[risk_cov_total.trade_date == ref_date]\n",
"\n",
" res = pd.merge(industry_matrix, benchmark_w, on=['code'], how='left').fillna(0.)\n",
" res = pd.merge(res, risk_matrix, on=['code'])\n",
" res = pd.merge(res, risk_exposure, on=['code'])\n",
" res = res.dropna()\n",
" codes = res.code.values.tolist()\n",
" \n",
" risk_exposure = res[all_styles].values\n",
" risk_cov = risk_cov[all_styles].values\n",
" special_risk = res[risk_model_name].values\n",
" sec_cov = risk_exposure @ risk_cov @ risk_exposure.T / 10000 + np.diag(special_risk ** 2) / 10000\n",
"\n",
" benchmark_w = res.weight.values\n",
" is_in_benchmark = (benchmark_w > 0.).astype(float).reshape((-1, 1))\n",
......@@ -257,11 +280,13 @@
" constraints,\n",
" False,\n",
" benchmark_w,\n",
" method='risk_neutral',\n",
" method=method,\n",
" turn_over_target=turn_over_target,\n",
" current_position=current_position,\n",
" lbound=lbound,\n",
" ubound=ubound)\n",
" ubound=ubound,\n",
" target_vol=target_vol,\n",
" cov=sec_cov)\n",
" except ValueError:\n",
" target_pos, _ = er_portfolio_analysis(er,\n",
" industry_matrix.industry_name.values,\n",
......@@ -269,9 +294,11 @@
" constraints,\n",
" False,\n",
" benchmark_w,\n",
" method='risk_neutral',\n",
" method=method,\n",
" lbound=lbound,\n",
" ubound=ubound)\n",
" ubound=ubound,\n",
" target_vol=target_vol,\n",
" cov=sec_cov)\n",
"\n",
" target_pos['code'] = codes\n",
" turn_over, executed_pos = executor.execute(target_pos=target_pos)\n",
......@@ -307,7 +334,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def create_report(ret_df, windows):\n",
......@@ -346,9 +375,9 @@
"source": [
"weight_gaps = [0.005, 0.010, 0.015, 0.020]\n",
"\n",
"with pd.ExcelWriter(f'{universe.base_universe[0]}_{benchmark_code}_perf.xlsx', engine='xlsxwriter') as writer:\n",
"with pd.ExcelWriter(f'{universe.base_universe[0]}_{benchmark_code}_gap.xlsx', engine='xlsxwriter') as writer:\n",
" for i, weight_gap in enumerate(weight_gaps):\n",
" ret_df = create_scenario(weight_gap)\n",
" ret_df = create_scenario(weight_gap, target_vol=0.01, method='risk_neutral')\n",
" res_df = create_report(ret_df, 25)\n",
" res_df.to_excel(writer, sheet_name=f'{i}')\n",
" alpha_logger.info(f\"{weight_gap} finished\")"
......@@ -359,6 +388,24 @@
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"target_vols = [0.015, 0.030, 0.045, 0.060]\n",
"\n",
"with pd.ExcelWriter(f'{universe.base_universe[0]}_{benchmark_code}_tv.xlsx', engine='xlsxwriter') as writer:\n",
" for i, target_vol in enumerate(target_vols):\n",
" ret_df = create_scenario(weight_gap=0.02, target_vol=target_vol, method='tv')\n",
" res_df = create_report(ret_df, 25)\n",
" res_df.to_excel(writer, sheet_name=f'{i}')\n",
" alpha_logger.info(f\"{target_vol:.4f} finished\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
......@@ -378,7 +425,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
"version": "3.6.3"
},
"varInspector": {
"cols": {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment