Skip to content

Commit

Permalink
0.9.49 新增事件因子的收益率特征分析
Browse files Browse the repository at this point in the history
  • Loading branch information
zengbin93 committed Apr 30, 2024
1 parent b6699c2 commit e1733ee
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 71 deletions.
1 change: 1 addition & 0 deletions czsc/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@
show_optuna_study,
show_drawdowns,
show_rolling_daily_performance,
show_event_return,
)

from czsc.utils.bi_info import (
Expand Down
57 changes: 0 additions & 57 deletions czsc/objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -913,63 +913,6 @@ def pairs(self):

return pairs

@deprecated(version="1.0.0", reason="请使用 czsc.utils.stats.evaluate_pairs")
def evaluate_pairs(self, trade_dir: str = "多空") -> dict:
"""评估交易表现
:param trade_dir: 交易方向,可选值 ['多头', '空头', '多空']
:return: 交易表现
"""
if trade_dir == "多空":
pairs = self.pairs
else:
pairs = [x for x in self.pairs if x["交易方向"] == trade_dir]
p = {
"交易标的": self.symbol,
"策略标记": self.name,
"交易方向": trade_dir,
"交易次数": len(pairs),
"累计收益": 0,
"单笔收益": 0,
"盈利次数": 0,
"累计盈利": 0,
"单笔盈利": 0,
"亏损次数": 0,
"累计亏损": 0,
"单笔亏损": 0,
"胜率": 0,
"累计盈亏比": 0,
"单笔盈亏比": 0,
"盈亏平衡点": 1,
}

if len(pairs) == 0:
return p

p["盈亏平衡点"] = round(cal_break_even_point([x["盈亏比例"] for x in pairs]), 4)
p["累计收益"] = round(sum([x["盈亏比例"] for x in pairs]), 2)
p["单笔收益"] = round(p["累计收益"] / p["交易次数"], 2)
p["平均持仓天数"] = round(sum([x["持仓天数"] for x in pairs]) / len(pairs), 2)
p["平均持仓K线数"] = round(sum([x["持仓K线数"] for x in pairs]) / len(pairs), 2)

win_ = [x for x in pairs if x["盈亏比例"] >= 0]
if len(win_) > 0:
p["盈利次数"] = len(win_)
p["累计盈利"] = sum([x["盈亏比例"] for x in win_])
p["单笔盈利"] = round(p["累计盈利"] / p["盈利次数"], 4)
p["胜率"] = round(p["盈利次数"] / p["交易次数"], 4)

loss_ = [x for x in pairs if x["盈亏比例"] < 0]
if len(loss_) > 0:
p["亏损次数"] = len(loss_)
p["累计亏损"] = sum([x["盈亏比例"] for x in loss_])
p["单笔亏损"] = round(p["累计亏损"] / p["亏损次数"], 4)

p["累计盈亏比"] = round(p["累计盈利"] / abs(p["累计亏损"]), 4)
p["单笔盈亏比"] = round(p["单笔盈利"] / abs(p["单笔亏损"]), 4)

return p

def evaluate_holds(self, trade_dir: str = "多空") -> dict:
"""按持仓信号评估交易表现
Expand Down
29 changes: 15 additions & 14 deletions czsc/utils/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
import pandas as pd
from pathlib import Path
from loguru import logger
from typing import Any
from typing import Any, Union, AnyStr


home_path = Path(os.environ.get("CZSC_HOME", os.path.join(os.path.expanduser("~"), '.czsc')))
home_path = Path(os.environ.get("CZSC_HOME", os.path.join(os.path.expanduser("~"), ".czsc")))
home_path.mkdir(parents=True, exist_ok=True)


Expand Down Expand Up @@ -88,13 +88,13 @@ def get(self, k: str, suffix: str = "pkl") -> Any:
return None

if suffix == "pkl":
res = dill.load(open(file, 'rb'))
res = dill.load(open(file, "rb"))
elif suffix == "json":
res = json.load(open(file, 'r', encoding='utf-8'))
res = json.load(open(file, "r", encoding="utf-8"))
elif suffix == "txt":
res = file.read_text(encoding='utf-8')
res = file.read_text(encoding="utf-8")
elif suffix == "csv":
res = pd.read_csv(file, encoding='utf-8')
res = pd.read_csv(file, encoding="utf-8")
elif suffix == "xlsx":
res = pd.read_excel(file)
elif suffix == "feather":
Expand All @@ -117,24 +117,24 @@ def set(self, k: str, v: Any, suffix: str = "pkl"):
logger.info(f"缓存文件 {file} 将被覆盖")

if suffix == "pkl":
dill.dump(v, open(file, 'wb'))
dill.dump(v, open(file, "wb"))

elif suffix == "json":
if not isinstance(v, dict):
raise ValueError("suffix json only support dict")
json.dump(v, open(file, 'w', encoding='utf-8'), ensure_ascii=False, indent=4)
json.dump(v, open(file, "w", encoding="utf-8"), ensure_ascii=False, indent=4)

elif suffix == "txt":
if not isinstance(v, str):
raise ValueError("suffix txt only support str")
file.write_text(v, encoding='utf-8')
file.write_text(v, encoding="utf-8")

elif suffix == "csv":
if not isinstance(v, pd.DataFrame):
raise ValueError("suffix csv only support pd.DataFrame")
v.to_csv(file, index=False, encoding='utf-8')
v.to_csv(file, index=False, encoding="utf-8")

elif suffix == 'xlsx':
elif suffix == "xlsx":
if not isinstance(v, pd.DataFrame):
raise ValueError("suffix xlsx only support pd.DataFrame")
v.to_excel(file, index=False)
Expand All @@ -160,13 +160,14 @@ def remove(self, k: str, suffix: str = "pkl"):
Path.unlink(file) if Path.exists(file) else None


def disk_cache(path: str = home_path, suffix: str = "pkl", ttl: int = -1):
def disk_cache(path: Union[AnyStr, Path] = home_path, suffix: str = "pkl", ttl: int = -1):
"""缓存装饰器,支持多种数据格式
:param path: 缓存文件夹父路径,默认为 home_path,每个函数的缓存文件夹为 path/func_name
:param suffix: 缓存文件后缀,支持 pkl, json, txt, csv, xlsx, feather, parquet
:param ttl: 缓存文件有效期,单位:秒
"""

def decorator(func):
nonlocal path
_c = DiskCache(path=Path(path) / func.__name__)
Expand All @@ -177,7 +178,7 @@ def cached_func(*args, **kwargs):

hash_str = f"{func.__name__}{args}{kwargs}"
code_str = inspect.getsource(func)
k = hashlib.md5((code_str + hash_str).encode('utf-8')).hexdigest().upper()[:8]
k = hashlib.md5((code_str + hash_str).encode("utf-8")).hexdigest().upper()[:8]
k = f"{k}_{func.__name__}"

if _c.is_found(k, suffix=suffix, ttl=ttl1):
Expand All @@ -194,7 +195,7 @@ def cached_func(*args, **kwargs):
return decorator


def clear_cache(path=home_path, subs=None, recreate=False):
def clear_cache(path: Union[AnyStr, Path] = home_path, subs=None, recreate=False):
"""清空缓存文件夹
:param path: 缓存文件夹路径
Expand Down

0 comments on commit e1733ee

Please sign in to comment.