forked from KenyonY/openai-forward
-
Notifications
You must be signed in to change notification settings - Fork 0
/
.env.example
77 lines (58 loc) · 3.56 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# `LOG_CHAT`: 是否开启日志
# `LOG_CHAT`: Whether to log the chat
LOG_CHAT=true
#PRINT_CHAT=true
CACHE_CHAT_COMPLETION=true
# `CACHE_BACKEND`: Options (MEMORY, LMDB, LevelDB)
CACHE_BACKEND=LMDB
BENCHMARK_MODE=true
# `OPENAI_BASE_URL`: 转发openai风格的任何服务地址,允许指定多个, 以逗号隔开。
# 如果指定超过一个,则任何OPENAI_ROUTE_PREFIX/EXTRA_ROUTE_PREFIX都不能为根路由/
# `OPENAI_BASE_URL`: Forward any service address in the style of OpenAI; multiple can be specified, separated by commas.
# If more than one is specified, neither OPENAI_ROUTE_PREFIX nor EXTRA_ROUTE_PREFIX can be the root route/
OPENAI_BASE_URL='https://api.openai.com, http://localhost:8080'
# `OPENAI_ROUTE_PREFIX`: 可指定所有openai风格(为记录日志)服务的转发路由前缀
# `OPENAI_ROUTE_PREFIX`: Specify the forwarding route prefix for all services in OpenAI style (for logging purposes)
OPENAI_ROUTE_PREFIX='/openai, /localai'
CHAT_COMPLETION_ROUTE=/openai/v1/chat/completions
COMPLETION_ROUTE=/v1/completions
# `OPENAI_API_KEY`:配置OpenAI 接口风格的API密钥,支持使用多个密钥,通过逗号分隔
# `OPENAI_API_KEY`: Configure API key in OpenAI style, supports using multiple keys separated by commas
OPENAI_API_KEY='sk-***, sk-***, ***'
# `FORWARD_KEY`: 当前面的OPENAI_API_KEY被设置,就可以设置这里的FORWARD_KEY,客户端调用时可以使用FORWARD_KEY作为api key
# `FORWARD_KEY`: Set a custom key for proxying, multiple keys can be separated by commas.
FORWARD_KEY='fk-***, ***, 123'
# EXTRA_BASE_URL: 可指定任意服务转发
# `EXTRA_BASE_URL`: Specify any service for forwarding
EXTRA_BASE_URL='http://localhost:8882, http://localhost:8881'
# EXTRA_ROUTE_PREFIX: 与 EXTRA_BASE_URL 匹配的路由前缀
# `EXTRA_ROUTE_PREFIX`: Route prefix matching EXTRA_BASE_URL
EXTRA_ROUTE_PREFIX='/tts, /translate'
# `REQ_RATE_LIMIT`: 指定路由的请求速率限制(区分用户)
# `REQ_RATE_LIMIT`: i.e., Request rate limit for specified routes, user specific
# format: {route: ratelimit-string}
# ratelimit-string format [count] [per|/] [n (optional)] [second|minute|hour|day|month|year] :ref:`ratelimit-string`: https://limits.readthedocs.io/en/stable/quickstart.html#rate-limit-string-notation
REQ_RATE_LIMIT='{
"/openai/v1/chat/completions": "1/10seconds",
"/localai/v1/chat/completions": "2/second"
}'
# rate limit后端: [memory, redis, Memcached, ...] :ref: https://limits.readthedocs.io/en/stable/storage.html#
# Backend for rate limiting: [memory, redis, memcached, ...] :ref: https://limits.readthedocs.io/en/stable/storage.html#
# Assuming your Redis service is at localhost:6379
REQ_RATE_LIMIT_BACKEND="redis://localhost:6379"
# `GLOBAL_RATE_LIMIT`: 限制所有`REQ_RATE_LIMIT`没有指定的路由. 不填默认无限制
GLOBAL_RATE_LIMIT=inf
#`RATE_LIMIT_STRATEGY` Options: (fixed-window, fixed-window-elastic-expiry, moving-window) ref: https://limits.readthedocs.io/en/latest/strategies.html
# `fixed-window`: most memory efficient strategy; `moving-window`:most effective for preventing bursts but higher memory cost.
RATE_LIMIT_STRATEGY=fixed-window
# `PROXY` http代理
PROXY=http://localhost:7890
# `TOKEN_RATE_LIMIT` 对每一份流式返回的token速率限制 (注:这里的token并不严格等于gpt中定义的token,而是SSE的chunk)
# Rate limit for returned tokens
TOKEN_RATE_LIMIT={"/v1/chat/completions":"20/second", "/benchmark/v1/chat/completions":"500/second"}
# TCP connection timeout duration (in seconds)
TIMEOUT=10
ITER_CHUNK_TYPE=efficiency
#ITER_CHUNK_TYPE=one-by-one
# Set timezone
TZ=Asia/Shanghai