gplearn因子挖掘:​分钟级数据效果还是非常好的:年化81%,最大回撤​10%。(quantlab3.1源代码+数据下载)

今天的两件事情:

1、补充gplearn函数集,向world quant 101对齐,同时争取与咱们因子表达式复用。

2、发布3.1版本。

使用我们准备好的脚本,下载好螺纹钢的主连合编数据,后续咱们也会准备分钟级数据等,但对于咱们的因子挖掘没有本质的影响:

symbol = 'RB0'
df = ak.futures_main_sina(symbol="V0", start_date="19900101", end_date=datetime.now().strftime('%Y%m%d'))

df.rename(columns={'日期': 'date', '开盘价': 'open', '最高价': 'high', '最低价': 'low', '收盘价': 'close', '成交量': 'volume',
'持仓量': 'open_interest', '动态结算价': 'vwap'}, inplace=True)
print(df['date'])
df['date'] = df['date'].apply(lambda x: str(x).replace('-', ''))
from config import DATA_DIR

df.to_csv(DATA_DIR.joinpath('futures').joinpath(symbol + '.csv'), index=None)
print(df)

图片

导入gpquant的代码,实现在适合咱们因子挖掘的23个基础函数(四则运算,正余弦等)和23个时间序列函数(偏度,峰度,相关性以及常见的技术指标):

图片

function_map = {
    "square": square1,
    "sqrt": sqrt1,
    "cube": cube1,
    "cbrt": cbrt1,
    "sign": sign1,
    "neg": neg1,
    "inv": inv1,
    "abs": abs1,
    "sin": sin1,
    "cos": cos1,
    "tan": tan1,
    "log": log1,
    "sig": sig1,
    "add": add2,
    "sub": sub2,
    "mul": mul2,
    "div": div2,
    "max": max2,
    "min": min2,
    "mean": mean2,
    "clear by cond": clear_by_cond3,
    "if then else": if_then_else3,
    "if cond then else": if_cond_then_else4,
    "ts delay": ts_delay2,
    "ts delta": ts_delta2,
    "ts pct change": ts_pct_change2,
    "ts mean return": ts_mean_return2,
    "ts max": ts_max2,
    "ts min": ts_min2,
    "ts sum": ts_sum2,
    "ts product": ts_product2,
    "ts mean": ts_mean2,
    "ts std": ts_std2,
    "ts median": ts_median2,
    "ts midpoint": ts_midpoint2,
    "ts skew": ts_skew2,
    "ts kurt": ts_kurt2,
    "ts inverse cv": ts_inverse_cv2,
    "ts cov": ts_cov3,
    "ts corr": ts_corr3,
    "ts autocorr": ts_autocorr3,
    "ts maxmin": ts_maxmin2,
    "ts zscore": ts_zscore2,
    "ts regression beta": ts_regression_beta3,
    "ts linear slope": ts_linear_slope2,
    "ts linear intercept": ts_linear_intercept2,
    "ts argmax": ts_argmax2,
    "ts argmin": ts_argmin2,
    "ts argmaxmin": ts_argmaxmin2,
    "ts rank": ts_rank2,
    "ts ema": ts_ema2,
    "ts dema": ts_dema2,
    "ts kama": ts_kama4,
    "ts AROONOSC": ts_AROONOSC3,
    "ts WR": ts_WR4,
    "ts CCI": ts_CCI4,
    "ts ATR": ts_ATR4,
    "ts NATR": ts_NATR4,
    "ts ADX": ts_ADX4,
    "ts MFI": ts_MFI5,
}

另外,gplearn要用于因子挖掘,还有一个重要的扩展,就是fitness适应度(也就是我们的优化目标,可以是年化收益,夏普比等,大家下载代码后,也可以自行扩展):

fitness_map = {
    "annual return": ann_return,
    "sharpe ratio": sharpe_ratio,
    "mean absolute error": mean_absolute_error,
    "mean square error": mean_square_error,
    "direction accuracy": direction_accuracy,
}

gplearn的代码:

import numpy as np
import pandas as pd
from gplearn import fitness
# 数据集
from gplearn.genetic import SymbolicRegressor

train_data = pd.read_csv('../data/IC_train.csv', index_col=0, parse_dates=[0])
test_data = pd.read_csv('../data/IC_test.csv', index_col=0, parse_dates=[0])
feature_names = list(train_data.columns)
train_data.loc[:, 'y'] = np.log(train_data['Close'].shift(-4) / train_data['Close'])
train_data.dropna(inplace=True)

from examples.backtest import BackTester

class SymbolicTestor(BackTester):  # 回测的设定
    def init(self):
        self.params = {'factor': pd.Series}

    @BackTester.process_strategy
    def run_(self, *args, **kwargs) -> dict[str: int]:
        factor = np.array(self.params['factor'])
        long_cond = factor > 0
        short_cond = factor < 0
        self.backtest_env['signal'] = np.where(long_cond, 1, np.where(short_cond, -1, np.nan))
        self.construct_position_(keep_raw=True, max_holding_period=1200, take_profit=None, stop_loss=None)

# 回测环境(适应度函数)
comm = [0 / 10000, 0 / 10000]  # 买卖费率
bt = SymbolicTestor(train_data, transact_base='PreClose', commissions=(comm[0], comm[1]))  # 加载数据,根据Close成交,comm是买-
def score_func_basic(y, y_pred, sample_weight):  # 因子评价指标
    try:
        _ = bt.run_(factor=y_pred)
        factor_ret = _['annualized_mean']/_['max_drawdown'] if _['max_drawdown'] != 0 else 0 # 可以把max_drawdown换成annualized_std
    except:
        factor_ret = 0
    return factor_ret

def my_gplearn(function_set, my_fitness, pop_num=100, gen_num=3, tour_num=10, random_state = 42, feature_names=None):
    # pop_num, gen_num, tour_num的几个可选值:500, 5, 50; 1000, 3, 20; 1000, 15, 100
    metric = fitness.make_fitness(function=my_fitness, # function(y, y_pred, sample_weight) that returns a floating point number.
                        greater_is_better=True,  # 上述y是输入的目标y向量,y_predgenetic program中的预测值,sample_weight是样本权重向量
                        wrap=False)  # 不保存,运行的更快 # gplearn.fitness.make_fitness(function, greater_is_better, wrap=True)
    return SymbolicRegressor(population_size=pop_num,  # 每一代公式群体中的公式数量 500100
                              generations=gen_num,  # 公式进化的世代数量 103
                              metric=metric,  # 适应度指标,这里是前述定义的通过 大于0做多,小于0做空的 累积净值/最大回撤 的评判函数
                              tournament_size=tour_num,  # 在每一代公式中选中tournament的规模,对适应度最高的公式进行变异或繁殖 50
                              function_set=function_set,
                              const_range=(-1.0, 1.0),  # 公式中包含的常数范围
                              parsimony_coefficient='auto',
                              # 对较大树的惩罚,默认0.001auto则用c = Cov(l,f)/Var( l), where Cov(l,f) is the covariance between program size l and program fitness f in the population, and Var(l) is the variance of program sizes.
                              stopping_criteria=100.0,  # 是对metric的限制(此处为收益/回撤)
                              init_depth=(2, 3),  # 公式树的初始化深度,树深度最小2层,最大6                              init_method='half and half',  # 树的形状,grow生分枝整的不对称,full长出浓密
                              p_crossover=0.8,  # 交叉变异概率 0.8
                              p_subtree_mutation=0.05,  # 子树变异概率
                              p_hoist_mutation=0.05,  # hoist变异概率 0.15
                              p_point_mutation=0.05,  # 点变异概率
                              p_point_replace=0.05,  # 点变异中每个节点进行变异进化的概率
                              max_samples=1.0,  # The fraction of samples to draw from X to evaluate each program on.
                              feature_names=feature_names, warm_start=False, low_memory=False,
                              n_jobs=1,
                              verbose=1,
                              random_state=random_state)

# 函数集
function_set=['add', 'sub', 'mul', 'div', 'sqrt', 'log',  # 用于构建和进化公式使用的函数集
                    'abs', 'neg', 'inv', 'sin', 'cos', 'tan', 'max', 'min',
                    # 'if', 'gtpn', 'andpn', 'orpn', 'ltpn', 'gtp', 'andp', 'orp', 'ltp', 'gtn', 'andn', 'orn', 'ltn', 'delayy', 'delta', 'signedpower', 'decayl', 'stdd', 'rankk'
                    ] # 最后一行是自己的函数,目前不用自己函数效果更好

my_cmodel_gp = my_gplearn(function_set, score_func_basic, random_state=0,
                          feature_names=feature_names)  # 可以通过换random_state来生成不同因子
my_cmodel_gp.fit(train_data.loc[:, :'rank_num'].values, train_data.loc[:, 'y'].values)
print(my_cmodel_gp)

# 策略结果
factor = my_cmodel_gp.predict(test_data.values)
bt_test = SymbolicTestor(test_data, transact_base='PreClose', commissions=(comm[0], comm[1]))  # 加载数据,根据Close成交,comm是买-bt_test.run_(factor=factor)
md = bt_test.summary()
print(md.out_stats)
print(bt.fees_factor)
md.plot_(comm=comm, show_bool=True)

分钟线,如果不加费率,那么效果还是非常好的:年化81%,最大回撤10%。

图片

图片

直接运行gpquant_demo.py即可。

图片

发布者:股市刺客,转载请注明出处:https://www.95sca.cn/archives/103592
站内所有文章皆来自网络转载或读者投稿,请勿用于商业用途。如有侵权、不妥之处,请联系站长并出示版权证明以便删除。敬请谅解!

(0)
股市刺客的头像股市刺客
上一篇 2024 年 7 月 29 日
下一篇 2024 年 7 月 29 日

相关推荐

发表回复

您的电子邮箱地址不会被公开。 必填项已用 * 标注