Commit d533b7a2 authored by Dr.李's avatar Dr.李

added more test

parent e9a1e696
...@@ -16,5 +16,3 @@ def perf_attribution_by_pos(net_weight_series: pd.Series, ...@@ -16,5 +16,3 @@ def perf_attribution_by_pos(net_weight_series: pd.Series,
next_bar_return_series, next_bar_return_series,
benchmark_table) benchmark_table)
return explained_table.groupby(level=0).sum() return explained_table.groupby(level=0).sum()
...@@ -7,12 +7,10 @@ Created on 2017-4-25 ...@@ -7,12 +7,10 @@ Created on 2017-4-25
import numpy as np import numpy as np
import numba as nb import numba as nb
from numpy import zeros
from numpy.linalg import solve
from typing import Tuple from typing import Tuple
from typing import Union from typing import Union
from typing import Dict from typing import Dict
from alphamind.utilities import groupby import alphamind.utilities as utils
def neutralize(x: np.ndarray, y: np.ndarray, groups: np.ndarray=None, output_explained=False, output_exposure=False) \ def neutralize(x: np.ndarray, y: np.ndarray, groups: np.ndarray=None, output_explained=False, output_exposure=False) \
...@@ -22,20 +20,20 @@ def neutralize(x: np.ndarray, y: np.ndarray, groups: np.ndarray=None, output_exp ...@@ -22,20 +20,20 @@ def neutralize(x: np.ndarray, y: np.ndarray, groups: np.ndarray=None, output_exp
y = y.reshape((-1, 1)) y = y.reshape((-1, 1))
if groups is not None: if groups is not None:
res = zeros(y.shape) res = np.zeros(y.shape)
if y.ndim == 2: if y.ndim == 2:
if output_explained: if output_explained:
explained = zeros(x.shape + (y.shape[1],)) explained = np.zeros(x.shape + (y.shape[1],))
if output_exposure: if output_exposure:
exposure = zeros(x.shape + (y.shape[1],)) exposure = np.zeros(x.shape + (y.shape[1],))
else: else:
if output_explained: if output_explained:
explained = zeros(x.shape + (1,)) explained = np.zeros(x.shape + (1,))
if output_exposure: if output_exposure:
exposure = zeros(x.shape + (1,)) exposure = np.zeros(x.shape + (1,))
index_diff, order = groupby(groups) index_diff, order = utils.groupby(groups)
start = 0 start = 0
for diff_loc in index_diff: for diff_loc in index_diff:
...@@ -90,7 +88,7 @@ def _sub_step(x, y, curr_idx, res): ...@@ -90,7 +88,7 @@ def _sub_step(x, y, curr_idx, res):
@nb.njit(nogil=True, cache=True) @nb.njit(nogil=True, cache=True)
def ls_fit(x: np.ndarray, y: np.ndarray) -> np.ndarray: def ls_fit(x: np.ndarray, y: np.ndarray) -> np.ndarray:
x_bar = x.T x_bar = x.T
b = solve(x_bar @ x, x_bar @ y) b = np.linalg.solve(x_bar @ x, x_bar @ y)
return b return b
...@@ -107,11 +105,3 @@ def ls_explain(x: np.ndarray, b: np.ndarray) -> np.ndarray: ...@@ -107,11 +105,3 @@ def ls_explain(x: np.ndarray, b: np.ndarray) -> np.ndarray:
explained[:, :, i] = b[:, i] * x explained[:, :, i] = b[:, i] * x
return explained return explained
if __name__ == '__main__':
x = np.random.randn(3000, 3)
y = np.random.randn(3000, 2)
groups = np.random.randint(30, size=3000)
print(neutralize(x, y, groups, output_explained=True, output_exposure=True))
# -*- coding: utf-8 -*-
"""
Created on 2017-5-12
@author: cheng.li
"""
import unittest
import numpy as np
import pandas as pd
from alphamind.analysis.perfanalysis import perf_attribution_by_pos
class TestPerformanceAnalysis(unittest.TestCase):
@classmethod
def test_perf_attribution_by_pos(cls):
n_samples = 36000
n_dates = 20
n_risk_factors = 35
dates = np.sort(np.random.randint(n_dates, size=n_samples))
weights_series = pd.Series(data=np.random.randn(n_samples), index=dates)
bm_series = pd.Series(data=np.random.randn(n_samples), index=dates)
next_bar_return_series = pd.Series(data=np.random.randn(n_samples), index=dates)
risk_table = pd.DataFrame(data=np.random.randn(n_samples, n_risk_factors),
columns=list(range(n_risk_factors)),
index=dates)
explained_table = perf_attribution_by_pos(weights_series - bm_series,
next_bar_return_series,
risk_table)
to_explain = (weights_series - bm_series).multiply(next_bar_return_series, axis=0)
aggregated_to_explain = pd.Series(to_explain).groupby(dates).sum()
aggregated_explained = explained_table.sum(axis=1)
np.testing.assert_array_almost_equal(aggregated_to_explain.values, aggregated_explained.values)
if __name__ == '__main__':
unittest.main()
...@@ -13,8 +13,7 @@ from alphamind.analysis.riskanalysis import risk_analysis ...@@ -13,8 +13,7 @@ from alphamind.analysis.riskanalysis import risk_analysis
class TestRiskAnalysis(unittest.TestCase): class TestRiskAnalysis(unittest.TestCase):
@classmethod def test_risk_analysis(self):
def test_risk_analysis(cls):
n_samples = 36000 n_samples = 36000
n_dates = 20 n_dates = 20
n_risk_factors = 35 n_risk_factors = 35
......
...@@ -20,6 +20,7 @@ from alphamind.tests.portfolio.test_percentbuild import TestPercentBuild ...@@ -20,6 +20,7 @@ from alphamind.tests.portfolio.test_percentbuild import TestPercentBuild
from alphamind.tests.portfolio.test_linearbuild import TestLinearBuild from alphamind.tests.portfolio.test_linearbuild import TestLinearBuild
from alphamind.tests.settlement.test_simplesettle import TestSimpleSettle from alphamind.tests.settlement.test_simplesettle import TestSimpleSettle
from alphamind.tests.analysis.test_riskanalysis import TestRiskAnalysis from alphamind.tests.analysis.test_riskanalysis import TestRiskAnalysis
from alphamind.tests.analysis.test_perfanalysis import TestPerformanceAnalysis
if __name__ == '__main__': if __name__ == '__main__':
...@@ -31,6 +32,7 @@ if __name__ == '__main__': ...@@ -31,6 +32,7 @@ if __name__ == '__main__':
TestPercentBuild, TestPercentBuild,
TestLinearBuild, TestLinearBuild,
TestSimpleSettle, TestSimpleSettle,
TestRiskAnalysis], TestRiskAnalysis,
TestPerformanceAnalysis],
alpha_logger) alpha_logger)
runner.run() runner.run()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment